VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 21337

Last change on this file since 21337 was 21337, checked in by vboxsync, 16 years ago

IPRT,HostDrv,AddDrv: Export public IPRT symbols for the linux kernel (pain).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 51.8 KB
Line 
1/* $Id: VBoxGuest.cpp 21337 2009-07-07 14:58:27Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2009 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23
24/*******************************************************************************
25* Header Files *
26*******************************************************************************/
27#define LOG_GROUP LOG_GROUP_DEFAULT
28#include "VBoxGuestInternal.h"
29#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
30#include <VBox/log.h>
31#include <iprt/mem.h>
32#include <iprt/time.h>
33#include <iprt/memobj.h>
34#include <iprt/asm.h>
35#include <iprt/string.h>
36#include <iprt/process.h>
37#include <iprt/assert.h>
38#include <iprt/param.h>
39#ifdef VBOX_WITH_HGCM
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Internal Functions *
46*******************************************************************************/
47#ifdef VBOX_WITH_HGCM
48static DECLCALLBACK(void) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
49#endif
50
51
52
53/**
54 * Reserves memory in which the VMM can relocate any guest mappings
55 * that are floating around.
56 *
57 * This operation is a little bit tricky since the VMM might not accept
58 * just any address because of address clashes between the three contexts
59 * it operates in, so use a small stack to perform this operation.
60 *
61 * @returns VBox status code (ignored).
62 * @param pDevExt The device extension.
63 */
64static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
65{
66 /** @todo implement this using RTR0MemObjReserveKernel() (it needs to be implemented everywhere too). */
67 return VINF_SUCCESS;
68}
69
70
71/**
72 * Initializes the interrupt filter mask.
73 *
74 * This will ASSUME that we're the ones in carge over the mask, so
75 * we'll simply clear all bits we don't set.
76 *
77 * @returns VBox status code (ignored).
78 * @param pDevExt The device extension.
79 * @param fMask The new mask.
80 */
81static int vboxGuestInitFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask)
82{
83 VMMDevCtlGuestFilterMask *pReq;
84 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
85 if (RT_SUCCESS(rc))
86 {
87 pReq->u32OrMask = fMask;
88 pReq->u32NotMask = ~fMask; /* It's an AND mask. */
89 rc = VbglGRPerform(&pReq->header);
90 if ( RT_FAILURE(rc)
91 || RT_FAILURE(pReq->header.rc))
92 LogRel(("vboxGuestInitCtlFilterMask: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
93 rc, pReq->header.rc));
94 VbglGRFree(&pReq->header);
95 }
96 return rc;
97}
98
99
100/**
101 * Report guest information to the VMMDev.
102 *
103 * @returns VBox status code.
104 * @param pDevExt The device extension.
105 * @param enmOSType The OS type to report.
106 */
107static int vboxGuestInitReportGuestInfo(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
108{
109 VMMDevReportGuestInfo *pReq;
110 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_ReportGuestInfo);
111 if (RT_SUCCESS(rc))
112 {
113 pReq->guestInfo.additionsVersion = VMMDEV_VERSION;
114 pReq->guestInfo.osType = enmOSType;
115 rc = VbglGRPerform(&pReq->header);
116 if ( RT_FAILURE(rc)
117 || RT_FAILURE(pReq->header.rc))
118 LogRel(("vboxGuestInitReportGuestInfo: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
119 rc, pReq->header.rc));
120 VbglGRFree(&pReq->header);
121 }
122 return rc;
123}
124
125
126/**
127 * Initializes the VBoxGuest device extension when the
128 * device driver is loaded.
129 *
130 * The native code locates the VMMDev on the PCI bus and retrieve
131 * the MMIO and I/O port ranges, this function will take care of
132 * mapping the MMIO memory (if present). Upon successful return
133 * the native code should set up the interrupt handler.
134 *
135 * @returns VBox status code.
136 *
137 * @param pDevExt The device extension. Allocated by the native code.
138 * @param IOPortBase The base of the I/O port range.
139 * @param pvMMIOBase The base of the MMIO memory mapping.
140 * This is optional, pass NULL if not present.
141 * @param cbMMIO The size of the MMIO memory mapping.
142 * This is optional, pass 0 if not present.
143 * @param enmOSType The guest OS type to report to the VMMDev.
144 * @param fEvents Additional requested events (like Mouse events).
145 */
146int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
147 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fEvents)
148{
149 int rc, rc2;
150
151 /*
152 * Initalize the data.
153 */
154 pDevExt->IOPortBase = IOPortBase;
155 pDevExt->pVMMDevMemory = NULL;
156 pDevExt->pIrqAckEvents = NULL;
157 pDevExt->WaitList.pHead = NULL;
158 pDevExt->WaitList.pTail = NULL;
159#ifdef VBOX_WITH_HGCM
160 pDevExt->HGCMWaitList.pHead = NULL;
161 pDevExt->HGCMWaitList.pTail = NULL;
162#endif
163 pDevExt->FreeList.pHead = NULL;
164 pDevExt->FreeList.pTail = NULL;
165 pDevExt->f32PendingEvents = 0;
166 pDevExt->u32ClipboardClientId = 0;
167 pDevExt->u32MousePosChangedSeq = 0;
168
169 /*
170 * If there is an MMIO region validate the version and size.
171 */
172 if (pvMMIOBase)
173 {
174 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
175 Assert(cbMMIO);
176 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
177 && pVMMDev->u32Size >= 32
178 && pVMMDev->u32Size <= cbMMIO)
179 {
180 pDevExt->pVMMDevMemory = pVMMDev;
181 Log(("VBoxGuestInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
182 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
183 }
184 else /* try live without it. */
185 LogRel(("VBoxGuestInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
186 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
187 }
188
189 /*
190 * Create the wait and seesion spinlocks.
191 */
192 rc = RTSpinlockCreate(&pDevExt->WaitSpinlock);
193 if (RT_SUCCESS(rc))
194 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock);
195 if (RT_FAILURE(rc))
196 {
197 Log(("VBoxGuestInitDevExt: failed to spinlock, rc=%d!\n", rc));
198 if (pDevExt->WaitSpinlock != NIL_RTSPINLOCK)
199 RTSpinlockDestroy(pDevExt->WaitSpinlock);
200 return rc;
201 }
202
203 /*
204 * Initialize the guest library and report the guest info back to VMMDev,
205 * set the interrupt control filter mask, and fixate the guest mappings
206 * made by the VMM.
207 */
208 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
209 if (RT_SUCCESS(rc))
210 {
211 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
212 if (RT_SUCCESS(rc))
213 {
214 rc = vboxGuestInitReportGuestInfo(pDevExt, enmOSType);
215 if (RT_SUCCESS(rc))
216 {
217#ifdef VBOX_WITH_HGCM
218 fEvents |= VMMDEV_EVENT_HGCM;
219#endif
220 rc = vboxGuestInitFilterMask(pDevExt, fEvents);
221 if (RT_SUCCESS(rc))
222 {
223 /*
224 * Disable guest graphics capability by default. The guest specific
225 * graphics driver will re-enable this when it is necessary.
226 */
227 rc = VBoxGuestSetGuestCapabilities(0, VMMDEV_GUEST_SUPPORTS_GRAPHICS);
228 if (RT_SUCCESS(rc))
229 {
230 vboxGuestInitFixateGuestMappings(pDevExt);
231 Log(("VBoxGuestInitDevExt: returns success\n"));
232 return VINF_SUCCESS;
233 }
234 }
235 }
236
237 /* failure cleanup */
238 }
239 else
240 Log(("VBoxGuestInitDevExt: VBoxGRAlloc failed, rc=%Rrc\n", rc));
241
242 VbglTerminate();
243 }
244 else
245 Log(("VBoxGuestInitDevExt: VbglInit failed, rc=%Rrc\n", rc));
246
247 rc2 = RTSpinlockDestroy(pDevExt->WaitSpinlock); AssertRC(rc2);
248 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
249 return rc; /* (failed) */
250}
251
252
253/**
254 * Deletes all the items in a wait chain.
255 * @param pWait The head of the chain.
256 */
257static void VBoxGuestDeleteWaitList(PVBOXGUESTWAITLIST pList)
258{
259 while (pList->pHead)
260 {
261 int rc2;
262 PVBOXGUESTWAIT pWait = pList->pHead;
263 pList->pHead = pWait->pNext;
264
265 pWait->pNext = NULL;
266 pWait->pPrev = NULL;
267 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
268 pWait->Event = NIL_RTSEMEVENTMULTI;
269 RTMemFree(pWait);
270 }
271 pList->pHead = NULL;
272 pList->pTail = NULL;
273}
274
275
276/**
277 * Destroys the VBoxGuest device extension.
278 *
279 * The native code should call this before the driver is loaded,
280 * but don't call this on shutdown.
281 *
282 * @param pDevExt The device extension.
283 */
284void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
285{
286 int rc2;
287 Log(("VBoxGuestDeleteDevExt:\n"));
288
289/** @todo tell VMMDev that the guest additions are no longer running (clear all capability masks).
290 * Like calling VBoxGuestSetGuestCapabilities. This wasn't done initially since it was not
291 * relevant for OS/2. On solaris modules can be unloaded, so we should implement it.
292 */
293
294 rc2 = RTSpinlockDestroy(pDevExt->WaitSpinlock); AssertRC(rc2);
295 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
296
297 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
298#ifdef VBOX_WITH_HGCM
299 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
300#endif
301 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
302
303 VbglTerminate();
304
305 pDevExt->pVMMDevMemory = NULL;
306
307 pDevExt->IOPortBase = 0;
308 pDevExt->pIrqAckEvents = NULL;
309}
310
311
312/**
313 * Creates a VBoxGuest user session.
314 *
315 * The native code calls this when a ring-3 client opens the device.
316 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
317 *
318 * @returns VBox status code.
319 * @param pDevExt The device extension.
320 * @param ppSession Where to store the session on success.
321 */
322int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
323{
324 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
325 if (RT_UNLIKELY(!pSession))
326 {
327 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
328 return VERR_NO_MEMORY;
329 }
330
331 pSession->Process = RTProcSelf();
332 pSession->R0Process = RTR0ProcHandleSelf();
333 pSession->pDevExt = pDevExt;
334
335 *ppSession = pSession;
336 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
337 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
338 return VINF_SUCCESS;
339}
340
341
342/**
343 * Creates a VBoxGuest kernel session.
344 *
345 * The native code calls this when a ring-0 client connects to the device.
346 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
347 *
348 * @returns VBox status code.
349 * @param pDevExt The device extension.
350 * @param ppSession Where to store the session on success.
351 */
352int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
353{
354 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
355 if (RT_UNLIKELY(!pSession))
356 {
357 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
358 return VERR_NO_MEMORY;
359 }
360
361 pSession->Process = NIL_RTPROCESS;
362 pSession->R0Process = NIL_RTR0PROCESS;
363 pSession->pDevExt = pDevExt;
364
365 *ppSession = pSession;
366 LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
367 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
368 return VINF_SUCCESS;
369}
370
371
372
373/**
374 * Closes a VBoxGuest session.
375 *
376 * @param pDevExt The device extension.
377 * @param pSession The session to close (and free).
378 */
379void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
380{
381 unsigned i; NOREF(i);
382 Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
383 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
384
385#ifdef VBOX_WITH_HGCM
386 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
387 if (pSession->aHGCMClientIds[i])
388 {
389 VBoxGuestHGCMDisconnectInfo Info;
390 Info.result = 0;
391 Info.u32ClientID = pSession->aHGCMClientIds[i];
392 pSession->aHGCMClientIds[i] = 0;
393 Log(("VBoxGuestCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
394 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
395 }
396#endif
397
398 pSession->pDevExt = NULL;
399 pSession->Process = NIL_RTPROCESS;
400 pSession->R0Process = NIL_RTR0PROCESS;
401 RTMemFree(pSession);
402}
403
404
405/**
406 * Links the wait-for-event entry into the tail of the given list.
407 *
408 * @param pList The list to link it into.
409 * @param pWait The wait for event entry to append.
410 */
411DECLINLINE(void) VBoxGuestWaitAppend(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
412{
413 const PVBOXGUESTWAIT pTail = pList->pTail;
414 pWait->pNext = NULL;
415 pWait->pPrev = pTail;
416 if (pTail)
417 pTail->pNext = pWait;
418 else
419 pList->pHead = pWait;
420 pList->pTail = pWait;
421}
422
423
424/**
425 * Unlinks the wait-for-event entry.
426 *
427 * @param pList The list to unlink it from.
428 * @param pWait The wait for event entry to unlink.
429 */
430DECLINLINE(void) VBoxGuestWaitUnlink(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
431{
432 const PVBOXGUESTWAIT pPrev = pWait->pPrev;
433 const PVBOXGUESTWAIT pNext = pWait->pNext;
434 if (pNext)
435 pNext->pPrev = pPrev;
436 else
437 pList->pTail = pPrev;
438 if (pPrev)
439 pPrev->pNext = pNext;
440 else
441 pList->pHead = pNext;
442}
443
444
445/**
446 * Allocates a wiat-for-event entry.
447 *
448 * @returns The wait-for-event entry.
449 * @param pDevExt The device extension.
450 */
451static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt)
452{
453 /*
454 * Allocate it one way or the other.
455 */
456 PVBOXGUESTWAIT pWait = pDevExt->FreeList.pTail;
457 if (pWait)
458 {
459 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
460 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
461
462 pWait = pDevExt->FreeList.pTail;
463 if (pWait)
464 VBoxGuestWaitUnlink(&pDevExt->FreeList, pWait);
465
466 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
467 }
468 if (!pWait)
469 {
470 static unsigned s_cErrors = 0;
471 int rc;
472
473 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
474 if (!pWait)
475 {
476 if (s_cErrors++ < 32)
477 LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
478 return NULL;
479 }
480
481 rc = RTSemEventMultiCreate(&pWait->Event);
482 if (RT_FAILURE(rc))
483 {
484 if (s_cErrors++ < 32)
485 LogRel(("VBoxGuestCommonIOCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
486 RTMemFree(pWait);
487 return NULL;
488 }
489 }
490
491 /*
492 * Zero members just as an precaution.
493 */
494 pWait->pNext = NULL;
495 pWait->pPrev = NULL;
496 pWait->fReqEvents = 0;
497 pWait->fResEvents = 0;
498#ifdef VBOX_WITH_HGCM
499 pWait->pHGCMReq = NULL;
500#endif
501 RTSemEventMultiReset(pWait->Event);
502 return pWait;
503}
504
505
506/**
507 * Frees the wait-for-event entry.
508 * The caller must own the wait spinlock!
509 *
510 * @param pDevExt The device extension.
511 * @param pWait The wait-for-event entry to free.
512 */
513static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
514{
515 pWait->fReqEvents = 0;
516 pWait->fResEvents = 0;
517#ifdef VBOX_WITH_HGCM
518 pWait->pHGCMReq = NULL;
519#endif
520 VBoxGuestWaitAppend(&pDevExt->FreeList, pWait);
521}
522
523
524/**
525 * Frees the wait-for-event entry.
526 *
527 * @param pDevExt The device extension.
528 * @param pWait The wait-for-event entry to free.
529 */
530static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
531{
532 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
533 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
534 VBoxGuestWaitFreeLocked(pDevExt, pWait);
535 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
536}
537
538
539/**
540 * Modifies the guest capabilities.
541 *
542 * Should be called during driver init and termination.
543 *
544 * @returns VBox status code.
545 * @param fOr The Or mask (what to enable).
546 * @param fNot The Not mask (what to disable).
547 */
548int VBoxGuestSetGuestCapabilities(uint32_t fOr, uint32_t fNot)
549{
550 VMMDevReqGuestCapabilities2 *pReq;
551 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
552 if (RT_FAILURE(rc))
553 {
554 Log(("VBoxGuestSetGuestCapabilities: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
555 sizeof(*pReq), sizeof(*pReq), rc));
556 return rc;
557 }
558
559 pReq->u32OrMask = fOr;
560 pReq->u32NotMask = fNot;
561
562 rc = VbglGRPerform(&pReq->header);
563 if (RT_FAILURE(rc))
564 Log(("VBoxGuestSetGuestCapabilities:VbglGRPerform failed, rc=%Rrc!\n", rc));
565 else if (RT_FAILURE(pReq->header.rc))
566 {
567 Log(("VBoxGuestSetGuestCapabilities: The request failed; VMMDev rc=%Rrc!\n", pReq->header.rc));
568 rc = pReq->header.rc;
569 }
570
571 VbglGRFree(&pReq->header);
572 return rc;
573}
574
575
576/**
577 * Implements the fast (no input or output) type of IOCtls.
578 *
579 * This is currently just a placeholder stub inherited from the support driver code.
580 *
581 * @returns VBox status code.
582 * @param iFunction The IOCtl function number.
583 * @param pDevExt The device extension.
584 * @param pSession The session.
585 */
586int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
587{
588 Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
589
590 return VERR_NOT_SUPPORTED;
591}
592
593
594
595static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
596{
597 Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
598 pInfo->portAddress = pDevExt->IOPortBase;
599 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
600 if (pcbDataReturned)
601 *pcbDataReturned = sizeof(*pInfo);
602 return VINF_SUCCESS;
603}
604
605
606/**
607 * Worker VBoxGuestCommonIOCtl_WaitEvent.
608 * The caller enters the spinlock, we may or may not leave it.
609 *
610 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
611 */
612DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWaitEventInfo *pInfo,
613 int iEvent, const uint32_t fReqEvents, PRTSPINLOCKTMP pTmp)
614{
615 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
616 if (fMatches)
617 {
618 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
619 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, pTmp);
620
621 pInfo->u32EventFlagsOut = fMatches;
622 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
623 if (fReqEvents & ~((uint32_t)1 << iEvent))
624 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
625 else
626 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
627 return VINF_SUCCESS;
628 }
629 return VERR_TIMEOUT;
630}
631
632
633static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned,
634 bool fInterruptible)
635{
636 pInfo->u32EventFlagsOut = 0;
637 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
638 if (pcbDataReturned)
639 *pcbDataReturned = sizeof(*pInfo);
640
641 /*
642 * Copy and verify the input mask.
643 */
644 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
645 int iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
646 if (RT_UNLIKELY(iEvent < 0))
647 {
648 Log(("VBoxGuestCommonIOCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
649 return VERR_INVALID_PARAMETER;
650 }
651
652 /*
653 * Check the condition up front, before doing the wait-for-event allocations.
654 */
655 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
656 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
657 int rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
658 if (rc == VINF_SUCCESS)
659 return rc;
660 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
661
662 if (!pInfo->u32TimeoutIn)
663 {
664 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
665 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
666 return VERR_TIMEOUT;
667 }
668
669 PVBOXGUESTWAIT pWait = VBoxGuestWaitAlloc(pDevExt);
670 if (!pWait)
671 return VERR_NO_MEMORY;
672 pWait->fReqEvents = fReqEvents;
673
674 /*
675 * We've got the wait entry now, re-enter the spinlock and check for the condition.
676 * If the wait condition is met, return.
677 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
678 */
679 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
680 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
681 if (rc == VINF_SUCCESS)
682 {
683 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
684 return rc;
685 }
686 VBoxGuestWaitAppend(&pDevExt->WaitList, pWait);
687 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
688
689 if (fInterruptible)
690 rc = RTSemEventMultiWaitNoResume(pWait->Event,
691 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
692 else
693 rc = RTSemEventMultiWait(pWait->Event,
694 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
695
696 /*
697 * There is one special case here and that's when the semaphore is
698 * destroyed upon device driver unload. This shouldn't happen of course,
699 * but in case it does, just get out of here ASAP.
700 */
701 if (rc == VERR_SEM_DESTROYED)
702 return rc;
703
704 /*
705 * Unlink the wait item and dispose of it.
706 */
707 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
708 VBoxGuestWaitUnlink(&pDevExt->WaitList, pWait);
709 const uint32_t fResEvents = pWait->fResEvents;
710 VBoxGuestWaitFreeLocked(pDevExt, pWait);
711 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
712
713 /*
714 * Now deal with the return code.
715 */
716 if (fResEvents)
717 {
718 pInfo->u32EventFlagsOut = fResEvents;
719 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
720 if (fReqEvents & ~((uint32_t)1 << iEvent))
721 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
722 else
723 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
724 rc = VINF_SUCCESS;
725 }
726 else if (rc == VERR_TIMEOUT)
727 {
728 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
729 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
730 }
731 else if (rc == VERR_INTERRUPTED)
732 {
733 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
734 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
735 }
736 else
737 {
738 if (RT_SUCCESS(rc))
739 {
740 static unsigned s_cErrors = 0;
741 if (s_cErrors++ < 32)
742 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
743 rc = VERR_INTERNAL_ERROR;
744 }
745 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
746 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc\n", rc));
747 }
748
749 return rc;
750}
751
752
753static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
754 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
755{
756 Log(("VBoxGuestCommonIOCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
757
758 /*
759 * Validate the header and request size.
760 */
761 const VMMDevRequestType enmType = pReqHdr->requestType;
762 const uint32_t cbReq = pReqHdr->size;
763 const uint32_t cbMinSize = vmmdevGetRequestSize(enmType);
764 if (cbReq < cbMinSize)
765 {
766 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
767 cbReq, cbMinSize, enmType));
768 return VERR_INVALID_PARAMETER;
769 }
770 if (cbReq > cbData)
771 {
772 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
773 cbData, cbReq, enmType));
774 return VERR_INVALID_PARAMETER;
775 }
776
777 /*
778 * Make a copy of the request in the physical memory heap so
779 * the VBoxGuestLibrary can more easily deal with the request.
780 * (This is really a waste of time since the OS or the OS specific
781 * code has already buffered or locked the input/output buffer, but
782 * it does makes things a bit simpler wrt to phys address.)
783 */
784 VMMDevRequestHeader *pReqCopy;
785 int rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
786 if (RT_FAILURE(rc))
787 {
788 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
789 cbReq, cbReq, rc));
790 return rc;
791 }
792 memcpy(pReqCopy, pReqHdr, cbReq);
793
794 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
795 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
796
797 rc = VbglGRPerform(pReqCopy);
798 if ( RT_SUCCESS(rc)
799 && RT_SUCCESS(pReqCopy->rc))
800 {
801 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
802 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
803
804 memcpy(pReqHdr, pReqCopy, cbReq);
805 if (pcbDataReturned)
806 *pcbDataReturned = cbReq;
807 }
808 else if (RT_FAILURE(rc))
809 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
810 else
811 {
812 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
813 rc = pReqCopy->rc;
814 }
815
816 VbglGRFree(pReqCopy);
817 return rc;
818}
819
820
821static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, VBoxGuestFilterMaskInfo *pInfo)
822{
823 VMMDevCtlGuestFilterMask *pReq;
824 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
825 if (RT_FAILURE(rc))
826 {
827 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
828 sizeof(*pReq), sizeof(*pReq), rc));
829 return rc;
830 }
831
832 pReq->u32OrMask = pInfo->u32OrMask;
833 pReq->u32NotMask = pInfo->u32NotMask;
834
835 rc = VbglGRPerform(&pReq->header);
836 if (RT_FAILURE(rc))
837 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: VbglGRPerform failed, rc=%Rrc!\n", rc));
838 else if (RT_FAILURE(pReq->header.rc))
839 {
840 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: The request failed; VMMDev rc=%Rrc!\n", pReq->header.rc));
841 rc = pReq->header.rc;
842 }
843
844 VbglGRFree(&pReq->header);
845 return rc;
846}
847
848#ifdef VBOX_WITH_HGCM
849
850AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
851
852/** Worker for VBoxGuestHGCMAsyncWaitCallback*. */
853static void VBoxGuestHGCMAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
854 bool fInterruptible, uint32_t cMillies)
855{
856
857 /*
858 * Check to see if the condition was met by the time we got here.
859 *
860 * We create a simple poll loop here for dealing with out-of-memory
861 * conditions since the caller isn't necessarily able to deal with
862 * us returning too early.
863 */
864 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
865 PVBOXGUESTWAIT pWait;
866 for (;;)
867 {
868 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
869 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
870 {
871 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
872 return;
873 }
874 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
875
876 pWait = VBoxGuestWaitAlloc(pDevExt);
877 if (pWait)
878 break;
879 if (fInterruptible)
880 return;
881 RTThreadSleep(1);
882 }
883 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
884 pWait->pHGCMReq = pHdr;
885
886 /*
887 * Re-enter the spinlock and re-check for the condition.
888 * If the condition is met, return.
889 * Otherwise link us into the HGCM wait list and go to sleep.
890 */
891 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
892 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
893 {
894 VBoxGuestWaitFreeLocked(pDevExt, pWait);
895 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
896 return;
897 }
898 VBoxGuestWaitAppend(&pDevExt->HGCMWaitList, pWait);
899 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
900
901 int rc;
902 if (fInterruptible)
903 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
904 else
905 rc = RTSemEventMultiWait(pWait->Event, cMillies);
906
907 /*
908 * This shouldn't ever return failure...
909 * Unlink, free and return.
910 */
911 if (rc == VERR_SEM_DESTROYED)
912 return;
913 if (RT_FAILURE(rc))
914 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
915
916 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
917 VBoxGuestWaitUnlink(&pDevExt->HGCMWaitList, pWait);
918 VBoxGuestWaitFreeLocked(pDevExt, pWait);
919 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
920}
921
922
923/**
924 * This is a callback for dealing with async waits.
925 *
926 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
927 */
928static DECLCALLBACK(void) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
929{
930 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
931 LogFunc(("requestType=%d\n", pHdr->header.requestType));
932 VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
933 pDevExt,
934 false /* fInterruptible */,
935 u32User /* cMillies */);
936}
937
938
939/**
940 * This is a callback for dealing with async waits with a timeout.
941 *
942 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
943 */
944static DECLCALLBACK(void) VBoxGuestHGCMAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr,
945 void *pvUser, uint32_t u32User)
946{
947 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
948 LogFunc(("requestType=%d\n", pHdr->header.requestType));
949 VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
950 pDevExt,
951 true /* fInterruptible */,
952 u32User /* cMillies */ );
953}
954
955
956static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMConnectInfo *pInfo,
957 size_t *pcbDataReturned)
958{
959 /*
960 * The VbglHGCMConnect call will invoke the callback if the HGCM
961 * call is performed in an ASYNC fashion. The function is not able
962 * to deal with cancelled requests.
963 */
964 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
965 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
966 ? pInfo->Loc.u.host.achName : "<not local host>"));
967
968 int rc = VbglR0HGCMInternalConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
969 if (RT_SUCCESS(rc))
970 {
971 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
972 pInfo->u32ClientID, pInfo->result, rc));
973 if (RT_SUCCESS(pInfo->result))
974 {
975 /*
976 * Append the client id to the client id table.
977 * If the table has somehow become filled up, we'll disconnect the session.
978 */
979 unsigned i;
980 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
981 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
982 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
983 if (!pSession->aHGCMClientIds[i])
984 {
985 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
986 break;
987 }
988 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
989 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
990 {
991 static unsigned s_cErrors = 0;
992 if (s_cErrors++ < 32)
993 LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
994
995 VBoxGuestHGCMDisconnectInfo Info;
996 Info.result = 0;
997 Info.u32ClientID = pInfo->u32ClientID;
998 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
999 return VERR_TOO_MANY_OPEN_FILES;
1000 }
1001 }
1002 if (pcbDataReturned)
1003 *pcbDataReturned = sizeof(*pInfo);
1004 }
1005 return rc;
1006}
1007
1008
1009static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
1010 size_t *pcbDataReturned)
1011{
1012 /*
1013 * Validate the client id and invalidate its entry while we're in the call.
1014 */
1015 const uint32_t u32ClientId = pInfo->u32ClientID;
1016 unsigned i;
1017 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1018 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1019 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1020 if (pSession->aHGCMClientIds[i] == u32ClientId)
1021 {
1022 pSession->aHGCMClientIds[i] = UINT32_MAX;
1023 break;
1024 }
1025 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1026 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1027 {
1028 static unsigned s_cErrors = 0;
1029 if (s_cErrors++ > 32)
1030 LogRel(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
1031 return VERR_INVALID_HANDLE;
1032 }
1033
1034 /*
1035 * The VbglHGCMConnect call will invoke the callback if the HGCM
1036 * call is performed in an ASYNC fashion. The function is not able
1037 * to deal with cancelled requests.
1038 */
1039 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
1040 int rc = VbglR0HGCMInternalDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1041 if (RT_SUCCESS(rc))
1042 {
1043 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
1044 if (pcbDataReturned)
1045 *pcbDataReturned = sizeof(*pInfo);
1046 }
1047
1048 /* Update the client id array according to the result. */
1049 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1050 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
1051 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
1052 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1053
1054 return rc;
1055}
1056
1057
1058static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt,
1059 PVBOXGUESTSESSION pSession,
1060 VBoxGuestHGCMCallInfo *pInfo,
1061 uint32_t cMillies, bool fInterruptible, bool f32bit,
1062 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
1063{
1064 /*
1065 * Some more validations.
1066 */
1067 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
1068 {
1069 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
1070 return VERR_INVALID_PARAMETER;
1071 }
1072 size_t cbActual = cbExtra + sizeof(*pInfo);
1073#ifdef RT_ARCH_AMD64
1074 if (f32bit)
1075 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
1076 else
1077#endif
1078 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
1079 if (cbData < cbActual)
1080 {
1081 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
1082 cbData, cbActual));
1083 return VERR_INVALID_PARAMETER;
1084 }
1085
1086 /*
1087 * Validate the client id.
1088 */
1089 const uint32_t u32ClientId = pInfo->u32ClientID;
1090 unsigned i;
1091 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1092 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1093 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1094 if (pSession->aHGCMClientIds[i] == u32ClientId)
1095 break;
1096 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1097 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
1098 {
1099 static unsigned s_cErrors = 0;
1100 if (s_cErrors++ > 32)
1101 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
1102 return VERR_INVALID_HANDLE;
1103 }
1104
1105 /*
1106 * The VbglHGCMCall call will invoke the callback if the HGCM
1107 * call is performed in an ASYNC fashion. This function can
1108 * deal with cancelled requests, so we let user more requests
1109 * be interruptible (should add a flag for this later I guess).
1110 */
1111 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
1112 int rc;
1113 uint32_t fFlags = pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
1114#ifdef RT_ARCH_AMD64
1115 if (f32bit)
1116 {
1117 if (fInterruptible)
1118 rc = VbglR0HGCMInternalCall32(pInfo, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1119 else
1120 rc = VbglR0HGCMInternalCall32(pInfo, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1121 }
1122 else
1123#endif
1124 {
1125 if (fInterruptible)
1126 rc = VbglR0HGCMInternalCall(pInfo, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1127 else
1128 rc = VbglR0HGCMInternalCall(pInfo, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1129 }
1130 if (RT_SUCCESS(rc))
1131 {
1132 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
1133 if (pcbDataReturned)
1134 *pcbDataReturned = cbActual;
1135 }
1136 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: Failed. rc=%Rrc.\n", rc));
1137 return rc;
1138}
1139
1140
1141/**
1142 * @returns VBox status code. Unlike the other HGCM IOCtls this will combine
1143 * the VbglHGCMConnect/Disconnect return code with the Info.result.
1144 */
1145static int VBoxGuestCommonIOCtl_HGCMClipboardReConnect(PVBOXGUESTDEVEXT pDevExt, uint32_t *pu32ClientId, size_t *pcbDataReturned)
1146{
1147 int rc;
1148 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: Current u32ClientId=%RX32\n", pDevExt->u32ClipboardClientId));
1149
1150
1151 /*
1152 * If there is an old client, try disconnect it first.
1153 */
1154 if (pDevExt->u32ClipboardClientId != 0)
1155 {
1156 VBoxGuestHGCMDisconnectInfo Info;
1157 Info.result = VERR_WRONG_ORDER;
1158 Info.u32ClientID = pDevExt->u32ClipboardClientId;
1159 rc = VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1160 if (RT_SUCCESS(rc))
1161 {
1162 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. VbglHGCMDisconnect -> rc=%Rrc\n", rc));
1163 return rc;
1164 }
1165 if (RT_FAILURE((int32_t)Info.result))
1166 {
1167 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. Info.result=%Rrc\n", rc));
1168 return Info.result;
1169 }
1170 pDevExt->u32ClipboardClientId = 0;
1171 }
1172
1173 /*
1174 * Try connect.
1175 */
1176 VBoxGuestHGCMConnectInfo Info;
1177 Info.Loc.type = VMMDevHGCMLoc_LocalHost_Existing;
1178 strcpy(Info.Loc.u.host.achName, "VBoxSharedClipboard");
1179 Info.u32ClientID = 0;
1180 Info.result = VERR_WRONG_ORDER;
1181
1182 rc = VbglR0HGCMInternalConnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1183 if (RT_FAILURE(rc))
1184 {
1185 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1186 return rc;
1187 }
1188 if (RT_FAILURE(Info.result))
1189 {
1190 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1191 return rc;
1192 }
1193
1194 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: connected successfully u32ClientId=%RX32\n", Info.u32ClientID));
1195
1196 pDevExt->u32ClipboardClientId = Info.u32ClientID;
1197 *pu32ClientId = Info.u32ClientID;
1198 if (pcbDataReturned)
1199 *pcbDataReturned = sizeof(uint32_t);
1200
1201 return VINF_SUCCESS;
1202}
1203
1204#endif /* VBOX_WITH_HGCM */
1205
1206/**
1207 * Guest backdoor logging.
1208 *
1209 * @returns VBox status code.
1210 *
1211 * @param pch The log message (need not be NULL terminated).
1212 * @param cbData Size of the buffer.
1213 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
1214 */
1215static int VBoxGuestCommonIOCtl_Log(const char *pch, size_t cbData, size_t *pcbDataReturned)
1216{
1217 Log(("%.*s", cbData, pch));
1218 if (pcbDataReturned)
1219 *pcbDataReturned = 0;
1220 return VINF_SUCCESS;
1221}
1222
1223
1224/**
1225 * Common IOCtl for user to kernel and kernel to kernel communcation.
1226 *
1227 * This function only does the basic validation and then invokes
1228 * worker functions that takes care of each specific function.
1229 *
1230 * @returns VBox status code.
1231 *
1232 * @param iFunction The requested function.
1233 * @param pDevExt The device extension.
1234 * @param pSession The client session.
1235 * @param pvData The input/output data buffer. Can be NULL depending on the function.
1236 * @param cbData The max size of the data buffer.
1237 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
1238 */
1239int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1240 void *pvData, size_t cbData, size_t *pcbDataReturned)
1241{
1242 Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
1243 iFunction, pDevExt, pSession, pvData, cbData));
1244
1245 /*
1246 * Define some helper macros to simplify validation.
1247 */
1248#define CHECKRET_RING0(mnemonic) \
1249 do { \
1250 if (pSession->R0Process != NIL_RTR0PROCESS) \
1251 { \
1252 Log(("VBoxGuestCommonIOCtl: " mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
1253 pSession->Process, (uintptr_t)pSession->R0Process)); \
1254 return VERR_PERMISSION_DENIED; \
1255 } \
1256 } while (0)
1257#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
1258 do { \
1259 if (cbData < (cbMin)) \
1260 { \
1261 Log(("VBoxGuestCommonIOCtl: " mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
1262 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
1263 return VERR_BUFFER_OVERFLOW; \
1264 } \
1265 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
1266 { \
1267 Log(("VBoxGuestCommonIOCtl: " mnemonic ": Invalid pointer %p\n", pvData)); \
1268 return VERR_INVALID_POINTER; \
1269 } \
1270 } while (0)
1271
1272
1273 /*
1274 * Deal with variably sized requests first.
1275 */
1276 int rc = VINF_SUCCESS;
1277 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
1278 {
1279 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
1280 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
1281 }
1282#ifdef VBOX_WITH_HGCM
1283 /*
1284 * These ones are a bit tricky.
1285 */
1286 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
1287 {
1288 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
1289 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
1290 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
1291 fInterruptible, false /*f32bit*/,
1292 0, cbData, pcbDataReturned);
1293 }
1294 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
1295 {
1296 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
1297 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
1298 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
1299 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
1300 false /*f32bit*/,
1301 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
1302 }
1303# ifdef RT_ARCH_AMD64
1304 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
1305 {
1306 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
1307 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
1308 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
1309 fInterruptible, true /*f32bit*/,
1310 0, cbData, pcbDataReturned);
1311 }
1312 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
1313 {
1314 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
1315 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
1316 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
1317 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
1318 true /*f32bit*/,
1319 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
1320 }
1321# endif
1322#endif /* VBOX_WITH_HGCM */
1323 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
1324 {
1325 CHECKRET_MIN_SIZE("LOG", 1);
1326 rc = VBoxGuestCommonIOCtl_Log((char *)pvData, cbData, pcbDataReturned);
1327 }
1328 else
1329 {
1330 switch (iFunction)
1331 {
1332 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
1333 CHECKRET_RING0("GETVMMDEVPORT");
1334 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
1335 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
1336 break;
1337
1338 case VBOXGUEST_IOCTL_WAITEVENT:
1339 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
1340 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, (VBoxGuestWaitEventInfo *)pvData, pcbDataReturned,
1341 pSession->R0Process != NIL_RTR0PROCESS);
1342 break;
1343
1344 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
1345 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
1346 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, (VBoxGuestFilterMaskInfo *)pvData);
1347 break;
1348
1349#ifdef VBOX_WITH_HGCM
1350 case VBOXGUEST_IOCTL_HGCM_CONNECT:
1351# ifdef RT_ARCH_AMD64
1352 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
1353# endif
1354 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
1355 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
1356 break;
1357
1358 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
1359# ifdef RT_ARCH_AMD64
1360 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
1361# endif
1362 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
1363 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
1364 break;
1365
1366 case VBOXGUEST_IOCTL_CLIPBOARD_CONNECT:
1367 CHECKRET_MIN_SIZE("CLIPBOARD_CONNECT", sizeof(uint32_t));
1368 rc = VBoxGuestCommonIOCtl_HGCMClipboardReConnect(pDevExt, (uint32_t *)pvData, pcbDataReturned);
1369 break;
1370#endif /* VBOX_WITH_HGCM */
1371
1372 default:
1373 {
1374 Log(("VBoxGuestCommonIOCtl: Unknown request iFunction=%#x Stripped size=%#x\n", iFunction,
1375 VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
1376 rc = VERR_NOT_SUPPORTED;
1377 break;
1378 }
1379 }
1380 }
1381
1382 Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
1383 return rc;
1384}
1385
1386
1387
1388/**
1389 * Common interrupt service routine.
1390 *
1391 * This deals with events and with waking up thread waiting for those events.
1392 *
1393 * @returns true if it was our interrupt, false if it wasn't.
1394 * @param pDevExt The VBoxGuest device extension.
1395 */
1396bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
1397{
1398 /*
1399 * Now we have to find out whether it was our IRQ. Read the event mask
1400 * from our device to see if there are any pending events.
1401 */
1402 bool fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
1403 if (fOurIrq)
1404 {
1405 /* Acknowlegde events. */
1406 VMMDevEvents *pReq = pDevExt->pIrqAckEvents;
1407 int rc = VbglGRPerform(&pReq->header);
1408 if ( RT_SUCCESS(rc)
1409 && RT_SUCCESS(pReq->header.rc))
1410 {
1411 uint32_t fEvents = pReq->events;
1412 Log(("VBoxGuestCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
1413
1414 /*
1415 * Enter the spinlock and examin the waiting threads.
1416 */
1417 int rc2 = 0;
1418 PVBOXGUESTWAIT pWait;
1419 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1420 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
1421
1422 /** @todo This looks wrong: Seems like VMMDEV_EVENT_HGCM will always be set in
1423 * f32PendingEvents... */
1424#ifdef VBOX_WITH_HGCM
1425 /* The HGCM event/list is kind of different in that we evaluate all entries. */
1426 if (fEvents & VMMDEV_EVENT_HGCM)
1427 for (pWait = pDevExt->HGCMWaitList.pHead; pWait; pWait = pWait->pNext)
1428 if ( !pWait->fResEvents
1429 && (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE))
1430 {
1431 pWait->fResEvents = VMMDEV_EVENT_HGCM;
1432 rc2 |= RTSemEventMultiSignal(pWait->Event);
1433 }
1434#endif
1435
1436 /* VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for. */
1437#if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS)
1438 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
1439 {
1440 pDevExt->u32MousePosChangedSeq++;
1441 VBoxGuestNativeISRMousePollEvent(pDevExt);
1442 }
1443#endif
1444 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
1445
1446 /* Normal FIFO evaluation. */
1447 fEvents |= pDevExt->f32PendingEvents;
1448 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
1449 if (!pWait->fResEvents)
1450 {
1451 pWait->fResEvents = pWait->fReqEvents & fEvents;
1452 fEvents &= ~pWait->fResEvents;
1453 rc2 |= RTSemEventMultiSignal(pWait->Event);
1454 if (!fEvents)
1455 break;
1456 }
1457
1458 ASMAtomicXchgU32(&pDevExt->f32PendingEvents, fEvents);
1459 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
1460 Assert(rc2 == 0);
1461 }
1462 else /* something is serious wrong... */
1463 Log(("VBoxGuestCommonISR: acknowledge events failed rc=%d, header rc=%d (events=%#x)!!\n",
1464 rc, pReq->header.rc, pReq->events));
1465 }
1466 else
1467 LogFlow(("VBoxGuestCommonISR: not ours\n"));
1468
1469 return fOurIrq;
1470}
1471
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette