VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 21450

Last change on this file since 21450 was 21450, checked in by vboxsync, 16 years ago

VBoxGuest.cpp: Fixed bug in the wake-for-event wakeup code: missing event matching, caused waiters to wake up when they shouldn't.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 53.0 KB
Line 
1/* $Id: VBoxGuest.cpp 21450 2009-07-09 16:33:54Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2009 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23
24/*******************************************************************************
25* Header Files *
26*******************************************************************************/
27#define LOG_GROUP LOG_GROUP_DEFAULT
28#include "VBoxGuestInternal.h"
29#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
30#include <VBox/log.h>
31#include <iprt/mem.h>
32#include <iprt/time.h>
33#include <iprt/memobj.h>
34#include <iprt/asm.h>
35#include <iprt/string.h>
36#include <iprt/process.h>
37#include <iprt/assert.h>
38#include <iprt/param.h>
39#ifdef VBOX_WITH_HGCM
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Internal Functions *
46*******************************************************************************/
47#ifdef VBOX_WITH_HGCM
48static DECLCALLBACK(void) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
49#endif
50
51
52
53/**
54 * Reserves memory in which the VMM can relocate any guest mappings
55 * that are floating around.
56 *
57 * This operation is a little bit tricky since the VMM might not accept
58 * just any address because of address clashes between the three contexts
59 * it operates in, so use a small stack to perform this operation.
60 *
61 * @returns VBox status code (ignored).
62 * @param pDevExt The device extension.
63 */
64static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
65{
66 /** @todo implement this using RTR0MemObjReserveKernel() (it needs to be implemented everywhere too). */
67 return VINF_SUCCESS;
68}
69
70
71/**
72 * Initializes the interrupt filter mask.
73 *
74 * This will ASSUME that we're the ones in carge over the mask, so
75 * we'll simply clear all bits we don't set.
76 *
77 * @returns VBox status code (ignored).
78 * @param pDevExt The device extension.
79 * @param fMask The new mask.
80 */
81static int vboxGuestInitFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask)
82{
83 VMMDevCtlGuestFilterMask *pReq;
84 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
85 if (RT_SUCCESS(rc))
86 {
87 pReq->u32OrMask = fMask;
88 pReq->u32NotMask = ~fMask; /* It's an AND mask. */
89 rc = VbglGRPerform(&pReq->header);
90 if ( RT_FAILURE(rc)
91 || RT_FAILURE(pReq->header.rc))
92 LogRel(("vboxGuestInitCtlFilterMask: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
93 rc, pReq->header.rc));
94 VbglGRFree(&pReq->header);
95 }
96 return rc;
97}
98
99
100/**
101 * Report guest information to the VMMDev.
102 *
103 * @returns VBox status code.
104 * @param pDevExt The device extension.
105 * @param enmOSType The OS type to report.
106 */
107static int vboxGuestInitReportGuestInfo(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
108{
109 VMMDevReportGuestInfo *pReq;
110 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_ReportGuestInfo);
111 if (RT_SUCCESS(rc))
112 {
113 pReq->guestInfo.additionsVersion = VMMDEV_VERSION;
114 pReq->guestInfo.osType = enmOSType;
115 rc = VbglGRPerform(&pReq->header);
116 if ( RT_FAILURE(rc)
117 || RT_FAILURE(pReq->header.rc))
118 LogRel(("vboxGuestInitReportGuestInfo: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
119 rc, pReq->header.rc));
120 VbglGRFree(&pReq->header);
121 }
122 return rc;
123}
124
125
126/**
127 * Initializes the VBoxGuest device extension when the
128 * device driver is loaded.
129 *
130 * The native code locates the VMMDev on the PCI bus and retrieve
131 * the MMIO and I/O port ranges, this function will take care of
132 * mapping the MMIO memory (if present). Upon successful return
133 * the native code should set up the interrupt handler.
134 *
135 * @returns VBox status code.
136 *
137 * @param pDevExt The device extension. Allocated by the native code.
138 * @param IOPortBase The base of the I/O port range.
139 * @param pvMMIOBase The base of the MMIO memory mapping.
140 * This is optional, pass NULL if not present.
141 * @param cbMMIO The size of the MMIO memory mapping.
142 * This is optional, pass 0 if not present.
143 * @param enmOSType The guest OS type to report to the VMMDev.
144 * @param fFixedEvents Events that will be enabled upon init and no client
145 * will ever be allowed to mask.
146 */
147int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
148 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
149{
150 int rc, rc2;
151
152 /*
153 * Adjust fFixedEvents.
154 */
155#ifdef VBOX_WITH_HGCM
156 fFixedEvents |= VMMDEV_EVENT_HGCM;
157#endif
158
159 /*
160 * Initalize the data.
161 */
162 pDevExt->IOPortBase = IOPortBase;
163 pDevExt->pVMMDevMemory = NULL;
164 pDevExt->fFixedEvents = fFixedEvents;
165 pDevExt->pIrqAckEvents = NULL;
166 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
167 pDevExt->WaitList.pHead = NULL;
168 pDevExt->WaitList.pTail = NULL;
169#ifdef VBOX_WITH_HGCM
170 pDevExt->HGCMWaitList.pHead = NULL;
171 pDevExt->HGCMWaitList.pTail = NULL;
172#endif
173 pDevExt->FreeList.pHead = NULL;
174 pDevExt->FreeList.pTail = NULL;
175 pDevExt->f32PendingEvents = 0;
176 pDevExt->u32ClipboardClientId = 0;
177 pDevExt->u32MousePosChangedSeq = 0;
178
179 /*
180 * If there is an MMIO region validate the version and size.
181 */
182 if (pvMMIOBase)
183 {
184 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
185 Assert(cbMMIO);
186 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
187 && pVMMDev->u32Size >= 32
188 && pVMMDev->u32Size <= cbMMIO)
189 {
190 pDevExt->pVMMDevMemory = pVMMDev;
191 Log(("VBoxGuestInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
192 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
193 }
194 else /* try live without it. */
195 LogRel(("VBoxGuestInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
196 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
197 }
198
199 /*
200 * Create the wait and seesion spinlocks.
201 */
202 rc = RTSpinlockCreate(&pDevExt->EventSpinlock);
203 if (RT_SUCCESS(rc))
204 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock);
205 if (RT_FAILURE(rc))
206 {
207 Log(("VBoxGuestInitDevExt: failed to spinlock, rc=%d!\n", rc));
208 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
209 RTSpinlockDestroy(pDevExt->EventSpinlock);
210 return rc;
211 }
212
213 /*
214 * Initialize the guest library and report the guest info back to VMMDev,
215 * set the interrupt control filter mask, and fixate the guest mappings
216 * made by the VMM.
217 */
218 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
219 if (RT_SUCCESS(rc))
220 {
221 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
222 if (RT_SUCCESS(rc))
223 {
224 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
225 Assert(pDevExt->PhysIrqAckEvents != 0);
226
227 rc = vboxGuestInitReportGuestInfo(pDevExt, enmOSType);
228 if (RT_SUCCESS(rc))
229 {
230 rc = vboxGuestInitFilterMask(pDevExt, fFixedEvents);
231 if (RT_SUCCESS(rc))
232 {
233 /*
234 * Disable guest graphics capability by default. The guest specific
235 * graphics driver will re-enable this when it is necessary.
236 */
237 rc = VBoxGuestSetGuestCapabilities(0, VMMDEV_GUEST_SUPPORTS_GRAPHICS);
238 if (RT_SUCCESS(rc))
239 {
240 vboxGuestInitFixateGuestMappings(pDevExt);
241 Log(("VBoxGuestInitDevExt: returns success\n"));
242 return VINF_SUCCESS;
243 }
244 }
245 }
246
247 /* failure cleanup */
248 }
249 else
250 Log(("VBoxGuestInitDevExt: VBoxGRAlloc failed, rc=%Rrc\n", rc));
251
252 VbglTerminate();
253 }
254 else
255 Log(("VBoxGuestInitDevExt: VbglInit failed, rc=%Rrc\n", rc));
256
257 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
258 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
259 return rc; /* (failed) */
260}
261
262
263/**
264 * Deletes all the items in a wait chain.
265 * @param pWait The head of the chain.
266 */
267static void VBoxGuestDeleteWaitList(PVBOXGUESTWAITLIST pList)
268{
269 while (pList->pHead)
270 {
271 int rc2;
272 PVBOXGUESTWAIT pWait = pList->pHead;
273 pList->pHead = pWait->pNext;
274
275 pWait->pNext = NULL;
276 pWait->pPrev = NULL;
277 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
278 pWait->Event = NIL_RTSEMEVENTMULTI;
279 RTMemFree(pWait);
280 }
281 pList->pHead = NULL;
282 pList->pTail = NULL;
283}
284
285
286/**
287 * Destroys the VBoxGuest device extension.
288 *
289 * The native code should call this before the driver is loaded,
290 * but don't call this on shutdown.
291 *
292 * @param pDevExt The device extension.
293 */
294void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
295{
296 int rc2;
297 Log(("VBoxGuestDeleteDevExt:\n"));
298
299/** @todo tell VMMDev that the guest additions are no longer running (clear all capability masks).
300 * Like calling VBoxGuestSetGuestCapabilities. This wasn't done initially since it was not
301 * relevant for OS/2. On solaris modules can be unloaded, so we should implement it.
302 */
303
304 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
305 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
306
307 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
308#ifdef VBOX_WITH_HGCM
309 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
310#endif
311 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
312
313 VbglTerminate();
314
315 pDevExt->pVMMDevMemory = NULL;
316
317 pDevExt->IOPortBase = 0;
318 pDevExt->pIrqAckEvents = NULL;
319}
320
321
322/**
323 * Creates a VBoxGuest user session.
324 *
325 * The native code calls this when a ring-3 client opens the device.
326 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
327 *
328 * @returns VBox status code.
329 * @param pDevExt The device extension.
330 * @param ppSession Where to store the session on success.
331 */
332int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
333{
334 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
335 if (RT_UNLIKELY(!pSession))
336 {
337 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
338 return VERR_NO_MEMORY;
339 }
340
341 pSession->Process = RTProcSelf();
342 pSession->R0Process = RTR0ProcHandleSelf();
343 pSession->pDevExt = pDevExt;
344
345 *ppSession = pSession;
346 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
347 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
348 return VINF_SUCCESS;
349}
350
351
352/**
353 * Creates a VBoxGuest kernel session.
354 *
355 * The native code calls this when a ring-0 client connects to the device.
356 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
357 *
358 * @returns VBox status code.
359 * @param pDevExt The device extension.
360 * @param ppSession Where to store the session on success.
361 */
362int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
363{
364 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
365 if (RT_UNLIKELY(!pSession))
366 {
367 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
368 return VERR_NO_MEMORY;
369 }
370
371 pSession->Process = NIL_RTPROCESS;
372 pSession->R0Process = NIL_RTR0PROCESS;
373 pSession->pDevExt = pDevExt;
374
375 *ppSession = pSession;
376 LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
377 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
378 return VINF_SUCCESS;
379}
380
381
382
383/**
384 * Closes a VBoxGuest session.
385 *
386 * @param pDevExt The device extension.
387 * @param pSession The session to close (and free).
388 */
389void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
390{
391 unsigned i; NOREF(i);
392 Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
393 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
394
395#ifdef VBOX_WITH_HGCM
396 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
397 if (pSession->aHGCMClientIds[i])
398 {
399 VBoxGuestHGCMDisconnectInfo Info;
400 Info.result = 0;
401 Info.u32ClientID = pSession->aHGCMClientIds[i];
402 pSession->aHGCMClientIds[i] = 0;
403 Log(("VBoxGuestCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
404 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
405 }
406#endif
407
408 pSession->pDevExt = NULL;
409 pSession->Process = NIL_RTPROCESS;
410 pSession->R0Process = NIL_RTR0PROCESS;
411 RTMemFree(pSession);
412}
413
414
415/**
416 * Links the wait-for-event entry into the tail of the given list.
417 *
418 * @param pList The list to link it into.
419 * @param pWait The wait for event entry to append.
420 */
421DECLINLINE(void) VBoxGuestWaitAppend(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
422{
423 const PVBOXGUESTWAIT pTail = pList->pTail;
424 pWait->pNext = NULL;
425 pWait->pPrev = pTail;
426 if (pTail)
427 pTail->pNext = pWait;
428 else
429 pList->pHead = pWait;
430 pList->pTail = pWait;
431}
432
433
434/**
435 * Unlinks the wait-for-event entry.
436 *
437 * @param pList The list to unlink it from.
438 * @param pWait The wait for event entry to unlink.
439 */
440DECLINLINE(void) VBoxGuestWaitUnlink(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
441{
442 const PVBOXGUESTWAIT pPrev = pWait->pPrev;
443 const PVBOXGUESTWAIT pNext = pWait->pNext;
444 if (pNext)
445 pNext->pPrev = pPrev;
446 else
447 pList->pTail = pPrev;
448 if (pPrev)
449 pPrev->pNext = pNext;
450 else
451 pList->pHead = pNext;
452}
453
454
455/**
456 * Allocates a wiat-for-event entry.
457 *
458 * @returns The wait-for-event entry.
459 * @param pDevExt The device extension.
460 */
461static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt)
462{
463 /*
464 * Allocate it one way or the other.
465 */
466 PVBOXGUESTWAIT pWait = pDevExt->FreeList.pTail;
467 if (pWait)
468 {
469 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
470 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
471
472 pWait = pDevExt->FreeList.pTail;
473 if (pWait)
474 VBoxGuestWaitUnlink(&pDevExt->FreeList, pWait);
475
476 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
477 }
478 if (!pWait)
479 {
480 static unsigned s_cErrors = 0;
481 int rc;
482
483 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
484 if (!pWait)
485 {
486 if (s_cErrors++ < 32)
487 LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
488 return NULL;
489 }
490
491 rc = RTSemEventMultiCreate(&pWait->Event);
492 if (RT_FAILURE(rc))
493 {
494 if (s_cErrors++ < 32)
495 LogRel(("VBoxGuestCommonIOCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
496 RTMemFree(pWait);
497 return NULL;
498 }
499 }
500
501 /*
502 * Zero members just as an precaution.
503 */
504 pWait->pNext = NULL;
505 pWait->pPrev = NULL;
506 pWait->fReqEvents = 0;
507 pWait->fResEvents = 0;
508#ifdef VBOX_WITH_HGCM
509 pWait->pHGCMReq = NULL;
510#endif
511 RTSemEventMultiReset(pWait->Event);
512 return pWait;
513}
514
515
516/**
517 * Frees the wait-for-event entry.
518 * The caller must own the wait spinlock!
519 *
520 * @param pDevExt The device extension.
521 * @param pWait The wait-for-event entry to free.
522 */
523static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
524{
525 pWait->fReqEvents = 0;
526 pWait->fResEvents = 0;
527#ifdef VBOX_WITH_HGCM
528 pWait->pHGCMReq = NULL;
529#endif
530 VBoxGuestWaitAppend(&pDevExt->FreeList, pWait);
531}
532
533
534/**
535 * Frees the wait-for-event entry.
536 *
537 * @param pDevExt The device extension.
538 * @param pWait The wait-for-event entry to free.
539 */
540static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
541{
542 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
543 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
544 VBoxGuestWaitFreeLocked(pDevExt, pWait);
545 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
546}
547
548
549/**
550 * Modifies the guest capabilities.
551 *
552 * Should be called during driver init and termination.
553 *
554 * @returns VBox status code.
555 * @param fOr The Or mask (what to enable).
556 * @param fNot The Not mask (what to disable).
557 */
558int VBoxGuestSetGuestCapabilities(uint32_t fOr, uint32_t fNot)
559{
560 VMMDevReqGuestCapabilities2 *pReq;
561 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
562 if (RT_FAILURE(rc))
563 {
564 Log(("VBoxGuestSetGuestCapabilities: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
565 sizeof(*pReq), sizeof(*pReq), rc));
566 return rc;
567 }
568
569 pReq->u32OrMask = fOr;
570 pReq->u32NotMask = fNot;
571
572 rc = VbglGRPerform(&pReq->header);
573 if (RT_FAILURE(rc))
574 Log(("VBoxGuestSetGuestCapabilities:VbglGRPerform failed, rc=%Rrc!\n", rc));
575 else if (RT_FAILURE(pReq->header.rc))
576 {
577 Log(("VBoxGuestSetGuestCapabilities: The request failed; VMMDev rc=%Rrc!\n", pReq->header.rc));
578 rc = pReq->header.rc;
579 }
580
581 VbglGRFree(&pReq->header);
582 return rc;
583}
584
585
586/**
587 * Implements the fast (no input or output) type of IOCtls.
588 *
589 * This is currently just a placeholder stub inherited from the support driver code.
590 *
591 * @returns VBox status code.
592 * @param iFunction The IOCtl function number.
593 * @param pDevExt The device extension.
594 * @param pSession The session.
595 */
596int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
597{
598 Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
599
600 return VERR_NOT_SUPPORTED;
601}
602
603
604
605static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
606{
607 Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
608 pInfo->portAddress = pDevExt->IOPortBase;
609 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
610 if (pcbDataReturned)
611 *pcbDataReturned = sizeof(*pInfo);
612 return VINF_SUCCESS;
613}
614
615
616/**
617 * Worker VBoxGuestCommonIOCtl_WaitEvent.
618 * The caller enters the spinlock, we may or may not leave it.
619 *
620 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
621 */
622DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWaitEventInfo *pInfo,
623 int iEvent, const uint32_t fReqEvents, PRTSPINLOCKTMP pTmp)
624{
625 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
626 if (fMatches)
627 {
628 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
629 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, pTmp);
630
631 pInfo->u32EventFlagsOut = fMatches;
632 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
633 if (fReqEvents & ~((uint32_t)1 << iEvent))
634 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
635 else
636 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
637 return VINF_SUCCESS;
638 }
639 return VERR_TIMEOUT;
640}
641
642
643static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned,
644 bool fInterruptible)
645{
646 pInfo->u32EventFlagsOut = 0;
647 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
648 if (pcbDataReturned)
649 *pcbDataReturned = sizeof(*pInfo);
650
651 /*
652 * Copy and verify the input mask.
653 */
654 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
655 int iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
656 if (RT_UNLIKELY(iEvent < 0))
657 {
658 Log(("VBoxGuestCommonIOCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
659 return VERR_INVALID_PARAMETER;
660 }
661
662 /*
663 * Check the condition up front, before doing the wait-for-event allocations.
664 */
665 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
666 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
667 int rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
668 if (rc == VINF_SUCCESS)
669 return rc;
670 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
671
672 if (!pInfo->u32TimeoutIn)
673 {
674 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
675 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
676 return VERR_TIMEOUT;
677 }
678
679 PVBOXGUESTWAIT pWait = VBoxGuestWaitAlloc(pDevExt);
680 if (!pWait)
681 return VERR_NO_MEMORY;
682 pWait->fReqEvents = fReqEvents;
683
684 /*
685 * We've got the wait entry now, re-enter the spinlock and check for the condition.
686 * If the wait condition is met, return.
687 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
688 */
689 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
690 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
691 if (rc == VINF_SUCCESS)
692 {
693 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
694 return rc;
695 }
696 VBoxGuestWaitAppend(&pDevExt->WaitList, pWait);
697 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
698
699 if (fInterruptible)
700 rc = RTSemEventMultiWaitNoResume(pWait->Event,
701 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
702 else
703 rc = RTSemEventMultiWait(pWait->Event,
704 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
705
706 /*
707 * There is one special case here and that's when the semaphore is
708 * destroyed upon device driver unload. This shouldn't happen of course,
709 * but in case it does, just get out of here ASAP.
710 */
711 if (rc == VERR_SEM_DESTROYED)
712 return rc;
713
714 /*
715 * Unlink the wait item and dispose of it.
716 */
717 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
718 VBoxGuestWaitUnlink(&pDevExt->WaitList, pWait);
719 const uint32_t fResEvents = pWait->fResEvents;
720 VBoxGuestWaitFreeLocked(pDevExt, pWait);
721 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
722
723 /*
724 * Now deal with the return code.
725 */
726 if (fResEvents)
727 {
728 pInfo->u32EventFlagsOut = fResEvents;
729 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
730 if (fReqEvents & ~((uint32_t)1 << iEvent))
731 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
732 else
733 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
734 rc = VINF_SUCCESS;
735 }
736 else if (rc == VERR_TIMEOUT)
737 {
738 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
739 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
740 }
741 else if (rc == VERR_INTERRUPTED)
742 {
743 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
744 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
745 }
746 else
747 {
748 if (RT_SUCCESS(rc))
749 {
750 static unsigned s_cErrors = 0;
751 if (s_cErrors++ < 32)
752 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
753 rc = VERR_INTERNAL_ERROR;
754 }
755 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
756 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc\n", rc));
757 }
758
759 return rc;
760}
761
762
763static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
764 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
765{
766 Log(("VBoxGuestCommonIOCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
767
768 /*
769 * Validate the header and request size.
770 */
771 const VMMDevRequestType enmType = pReqHdr->requestType;
772 const uint32_t cbReq = pReqHdr->size;
773 const uint32_t cbMinSize = vmmdevGetRequestSize(enmType);
774 if (cbReq < cbMinSize)
775 {
776 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
777 cbReq, cbMinSize, enmType));
778 return VERR_INVALID_PARAMETER;
779 }
780 if (cbReq > cbData)
781 {
782 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
783 cbData, cbReq, enmType));
784 return VERR_INVALID_PARAMETER;
785 }
786
787 /*
788 * Make a copy of the request in the physical memory heap so
789 * the VBoxGuestLibrary can more easily deal with the request.
790 * (This is really a waste of time since the OS or the OS specific
791 * code has already buffered or locked the input/output buffer, but
792 * it does makes things a bit simpler wrt to phys address.)
793 */
794 VMMDevRequestHeader *pReqCopy;
795 int rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
796 if (RT_FAILURE(rc))
797 {
798 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
799 cbReq, cbReq, rc));
800 return rc;
801 }
802 memcpy(pReqCopy, pReqHdr, cbReq);
803
804 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
805 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
806
807 rc = VbglGRPerform(pReqCopy);
808 if ( RT_SUCCESS(rc)
809 && RT_SUCCESS(pReqCopy->rc))
810 {
811 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
812 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
813
814 memcpy(pReqHdr, pReqCopy, cbReq);
815 if (pcbDataReturned)
816 *pcbDataReturned = cbReq;
817 }
818 else if (RT_FAILURE(rc))
819 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
820 else
821 {
822 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
823 rc = pReqCopy->rc;
824 }
825
826 VbglGRFree(pReqCopy);
827 return rc;
828}
829
830
831static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, VBoxGuestFilterMaskInfo *pInfo)
832{
833 VMMDevCtlGuestFilterMask *pReq;
834 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
835 if (RT_FAILURE(rc))
836 {
837 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
838 sizeof(*pReq), sizeof(*pReq), rc));
839 return rc;
840 }
841
842 pReq->u32OrMask = pInfo->u32OrMask;
843 pReq->u32NotMask = pInfo->u32NotMask;
844 pReq->u32NotMask &= ~pDevExt->fFixedEvents; /* don't permit these to be cleared! */
845 rc = VbglGRPerform(&pReq->header);
846 if (RT_FAILURE(rc))
847 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: VbglGRPerform failed, rc=%Rrc!\n", rc));
848 else if (RT_FAILURE(pReq->header.rc))
849 {
850 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: The request failed; VMMDev rc=%Rrc!\n", pReq->header.rc));
851 rc = pReq->header.rc;
852 }
853
854 VbglGRFree(&pReq->header);
855 return rc;
856}
857
858#ifdef VBOX_WITH_HGCM
859
860AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
861
862/** Worker for VBoxGuestHGCMAsyncWaitCallback*. */
863static void VBoxGuestHGCMAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
864 bool fInterruptible, uint32_t cMillies)
865{
866
867 /*
868 * Check to see if the condition was met by the time we got here.
869 *
870 * We create a simple poll loop here for dealing with out-of-memory
871 * conditions since the caller isn't necessarily able to deal with
872 * us returning too early.
873 */
874 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
875 PVBOXGUESTWAIT pWait;
876 for (;;)
877 {
878 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
879 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
880 {
881 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
882 return;
883 }
884 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
885
886 pWait = VBoxGuestWaitAlloc(pDevExt);
887 if (pWait)
888 break;
889 if (fInterruptible)
890 return;
891 RTThreadSleep(1);
892 }
893 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
894 pWait->pHGCMReq = pHdr;
895
896 /*
897 * Re-enter the spinlock and re-check for the condition.
898 * If the condition is met, return.
899 * Otherwise link us into the HGCM wait list and go to sleep.
900 */
901 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
902 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
903 {
904 VBoxGuestWaitFreeLocked(pDevExt, pWait);
905 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
906 return;
907 }
908 VBoxGuestWaitAppend(&pDevExt->HGCMWaitList, pWait);
909 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
910
911 int rc;
912 if (fInterruptible)
913 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
914 else
915 rc = RTSemEventMultiWait(pWait->Event, cMillies);
916
917 /*
918 * This shouldn't ever return failure...
919 * Unlink, free and return.
920 */
921 if (rc == VERR_SEM_DESTROYED)
922 return;
923 if (RT_FAILURE(rc))
924 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
925
926 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
927 VBoxGuestWaitUnlink(&pDevExt->HGCMWaitList, pWait);
928 VBoxGuestWaitFreeLocked(pDevExt, pWait);
929 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
930}
931
932
933/**
934 * This is a callback for dealing with async waits.
935 *
936 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
937 */
938static DECLCALLBACK(void) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
939{
940 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
941 Log(("VBoxGuestHGCMAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
942 VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
943 pDevExt,
944 false /* fInterruptible */,
945 u32User /* cMillies */);
946}
947
948
949/**
950 * This is a callback for dealing with async waits with a timeout.
951 *
952 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
953 */
954static DECLCALLBACK(void) VBoxGuestHGCMAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr,
955 void *pvUser, uint32_t u32User)
956{
957 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
958 Log(("VBoxGuestHGCMAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
959 VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
960 pDevExt,
961 true /* fInterruptible */,
962 u32User /* cMillies */ );
963}
964
965
966static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMConnectInfo *pInfo,
967 size_t *pcbDataReturned)
968{
969 /*
970 * The VbglHGCMConnect call will invoke the callback if the HGCM
971 * call is performed in an ASYNC fashion. The function is not able
972 * to deal with cancelled requests.
973 */
974 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
975 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
976 ? pInfo->Loc.u.host.achName : "<not local host>"));
977
978 int rc = VbglR0HGCMInternalConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
979 if (RT_SUCCESS(rc))
980 {
981 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
982 pInfo->u32ClientID, pInfo->result, rc));
983 if (RT_SUCCESS(pInfo->result))
984 {
985 /*
986 * Append the client id to the client id table.
987 * If the table has somehow become filled up, we'll disconnect the session.
988 */
989 unsigned i;
990 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
991 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
992 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
993 if (!pSession->aHGCMClientIds[i])
994 {
995 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
996 break;
997 }
998 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
999 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1000 {
1001 static unsigned s_cErrors = 0;
1002 if (s_cErrors++ < 32)
1003 LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
1004
1005 VBoxGuestHGCMDisconnectInfo Info;
1006 Info.result = 0;
1007 Info.u32ClientID = pInfo->u32ClientID;
1008 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1009 return VERR_TOO_MANY_OPEN_FILES;
1010 }
1011 }
1012 if (pcbDataReturned)
1013 *pcbDataReturned = sizeof(*pInfo);
1014 }
1015 return rc;
1016}
1017
1018
1019static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
1020 size_t *pcbDataReturned)
1021{
1022 /*
1023 * Validate the client id and invalidate its entry while we're in the call.
1024 */
1025 const uint32_t u32ClientId = pInfo->u32ClientID;
1026 unsigned i;
1027 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1028 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1029 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1030 if (pSession->aHGCMClientIds[i] == u32ClientId)
1031 {
1032 pSession->aHGCMClientIds[i] = UINT32_MAX;
1033 break;
1034 }
1035 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1036 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1037 {
1038 static unsigned s_cErrors = 0;
1039 if (s_cErrors++ > 32)
1040 LogRel(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
1041 return VERR_INVALID_HANDLE;
1042 }
1043
1044 /*
1045 * The VbglHGCMConnect call will invoke the callback if the HGCM
1046 * call is performed in an ASYNC fashion. The function is not able
1047 * to deal with cancelled requests.
1048 */
1049 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
1050 int rc = VbglR0HGCMInternalDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1051 if (RT_SUCCESS(rc))
1052 {
1053 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
1054 if (pcbDataReturned)
1055 *pcbDataReturned = sizeof(*pInfo);
1056 }
1057
1058 /* Update the client id array according to the result. */
1059 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1060 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
1061 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
1062 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1063
1064 return rc;
1065}
1066
1067
1068static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt,
1069 PVBOXGUESTSESSION pSession,
1070 VBoxGuestHGCMCallInfo *pInfo,
1071 uint32_t cMillies, bool fInterruptible, bool f32bit,
1072 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
1073{
1074 /*
1075 * Some more validations.
1076 */
1077 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
1078 {
1079 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
1080 return VERR_INVALID_PARAMETER;
1081 }
1082 size_t cbActual = cbExtra + sizeof(*pInfo);
1083#ifdef RT_ARCH_AMD64
1084 if (f32bit)
1085 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
1086 else
1087#endif
1088 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
1089 if (cbData < cbActual)
1090 {
1091 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
1092 cbData, cbActual));
1093 return VERR_INVALID_PARAMETER;
1094 }
1095
1096 /*
1097 * Validate the client id.
1098 */
1099 const uint32_t u32ClientId = pInfo->u32ClientID;
1100 unsigned i;
1101 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1102 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1103 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1104 if (pSession->aHGCMClientIds[i] == u32ClientId)
1105 break;
1106 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1107 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
1108 {
1109 static unsigned s_cErrors = 0;
1110 if (s_cErrors++ > 32)
1111 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
1112 return VERR_INVALID_HANDLE;
1113 }
1114
1115 /*
1116 * The VbglHGCMCall call will invoke the callback if the HGCM
1117 * call is performed in an ASYNC fashion. This function can
1118 * deal with cancelled requests, so we let user more requests
1119 * be interruptible (should add a flag for this later I guess).
1120 */
1121 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
1122 int rc;
1123 uint32_t fFlags = pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
1124#ifdef RT_ARCH_AMD64
1125 if (f32bit)
1126 {
1127 if (fInterruptible)
1128 rc = VbglR0HGCMInternalCall32(pInfo, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1129 else
1130 rc = VbglR0HGCMInternalCall32(pInfo, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1131 }
1132 else
1133#endif
1134 {
1135 if (fInterruptible)
1136 rc = VbglR0HGCMInternalCall(pInfo, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1137 else
1138 rc = VbglR0HGCMInternalCall(pInfo, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1139 }
1140 if (RT_SUCCESS(rc))
1141 {
1142 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
1143 if (pcbDataReturned)
1144 *pcbDataReturned = cbActual;
1145 }
1146 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: Failed. rc=%Rrc.\n", rc));
1147 return rc;
1148}
1149
1150
1151/**
1152 * @returns VBox status code. Unlike the other HGCM IOCtls this will combine
1153 * the VbglHGCMConnect/Disconnect return code with the Info.result.
1154 */
1155static int VBoxGuestCommonIOCtl_HGCMClipboardReConnect(PVBOXGUESTDEVEXT pDevExt, uint32_t *pu32ClientId, size_t *pcbDataReturned)
1156{
1157 int rc;
1158 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: Current u32ClientId=%RX32\n", pDevExt->u32ClipboardClientId));
1159
1160
1161 /*
1162 * If there is an old client, try disconnect it first.
1163 */
1164 if (pDevExt->u32ClipboardClientId != 0)
1165 {
1166 VBoxGuestHGCMDisconnectInfo Info;
1167 Info.result = VERR_WRONG_ORDER;
1168 Info.u32ClientID = pDevExt->u32ClipboardClientId;
1169 rc = VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1170 if (RT_SUCCESS(rc))
1171 {
1172 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. VbglHGCMDisconnect -> rc=%Rrc\n", rc));
1173 return rc;
1174 }
1175 if (RT_FAILURE((int32_t)Info.result))
1176 {
1177 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. Info.result=%Rrc\n", rc));
1178 return Info.result;
1179 }
1180 pDevExt->u32ClipboardClientId = 0;
1181 }
1182
1183 /*
1184 * Try connect.
1185 */
1186 VBoxGuestHGCMConnectInfo Info;
1187 Info.Loc.type = VMMDevHGCMLoc_LocalHost_Existing;
1188 strcpy(Info.Loc.u.host.achName, "VBoxSharedClipboard");
1189 Info.u32ClientID = 0;
1190 Info.result = VERR_WRONG_ORDER;
1191
1192 rc = VbglR0HGCMInternalConnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1193 if (RT_FAILURE(rc))
1194 {
1195 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1196 return rc;
1197 }
1198 if (RT_FAILURE(Info.result))
1199 {
1200 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1201 return rc;
1202 }
1203
1204 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: connected successfully u32ClientId=%RX32\n", Info.u32ClientID));
1205
1206 pDevExt->u32ClipboardClientId = Info.u32ClientID;
1207 *pu32ClientId = Info.u32ClientID;
1208 if (pcbDataReturned)
1209 *pcbDataReturned = sizeof(uint32_t);
1210
1211 return VINF_SUCCESS;
1212}
1213
1214#endif /* VBOX_WITH_HGCM */
1215
1216/**
1217 * Guest backdoor logging.
1218 *
1219 * @returns VBox status code.
1220 *
1221 * @param pch The log message (need not be NULL terminated).
1222 * @param cbData Size of the buffer.
1223 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
1224 */
1225static int VBoxGuestCommonIOCtl_Log(const char *pch, size_t cbData, size_t *pcbDataReturned)
1226{
1227 Log(("%.*s", cbData, pch));
1228 if (pcbDataReturned)
1229 *pcbDataReturned = 0;
1230 return VINF_SUCCESS;
1231}
1232
1233
1234/**
1235 * Common IOCtl for user to kernel and kernel to kernel communcation.
1236 *
1237 * This function only does the basic validation and then invokes
1238 * worker functions that takes care of each specific function.
1239 *
1240 * @returns VBox status code.
1241 *
1242 * @param iFunction The requested function.
1243 * @param pDevExt The device extension.
1244 * @param pSession The client session.
1245 * @param pvData The input/output data buffer. Can be NULL depending on the function.
1246 * @param cbData The max size of the data buffer.
1247 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
1248 */
1249int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1250 void *pvData, size_t cbData, size_t *pcbDataReturned)
1251{
1252 Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
1253 iFunction, pDevExt, pSession, pvData, cbData));
1254
1255 /*
1256 * Make sure the returned data size is set to zero.
1257 */
1258 if (pcbDataReturned)
1259 *pcbDataReturned = 0;
1260
1261 /*
1262 * Define some helper macros to simplify validation.
1263 */
1264#define CHECKRET_RING0(mnemonic) \
1265 do { \
1266 if (pSession->R0Process != NIL_RTR0PROCESS) \
1267 { \
1268 Log(("VBoxGuestCommonIOCtl: " mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
1269 pSession->Process, (uintptr_t)pSession->R0Process)); \
1270 return VERR_PERMISSION_DENIED; \
1271 } \
1272 } while (0)
1273#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
1274 do { \
1275 if (cbData < (cbMin)) \
1276 { \
1277 Log(("VBoxGuestCommonIOCtl: " mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
1278 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
1279 return VERR_BUFFER_OVERFLOW; \
1280 } \
1281 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
1282 { \
1283 Log(("VBoxGuestCommonIOCtl: " mnemonic ": Invalid pointer %p\n", pvData)); \
1284 return VERR_INVALID_POINTER; \
1285 } \
1286 } while (0)
1287
1288
1289 /*
1290 * Deal with variably sized requests first.
1291 */
1292 int rc = VINF_SUCCESS;
1293 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
1294 {
1295 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
1296 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
1297 }
1298#ifdef VBOX_WITH_HGCM
1299 /*
1300 * These ones are a bit tricky.
1301 */
1302 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
1303 {
1304 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
1305 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
1306 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
1307 fInterruptible, false /*f32bit*/,
1308 0, cbData, pcbDataReturned);
1309 }
1310 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
1311 {
1312 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
1313 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
1314 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
1315 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
1316 false /*f32bit*/,
1317 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
1318 }
1319# ifdef RT_ARCH_AMD64
1320 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
1321 {
1322 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
1323 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
1324 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
1325 fInterruptible, true /*f32bit*/,
1326 0, cbData, pcbDataReturned);
1327 }
1328 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
1329 {
1330 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
1331 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
1332 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
1333 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
1334 true /*f32bit*/,
1335 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
1336 }
1337# endif
1338#endif /* VBOX_WITH_HGCM */
1339 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
1340 {
1341 CHECKRET_MIN_SIZE("LOG", 1);
1342 rc = VBoxGuestCommonIOCtl_Log((char *)pvData, cbData, pcbDataReturned);
1343 }
1344 else
1345 {
1346 switch (iFunction)
1347 {
1348 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
1349 CHECKRET_RING0("GETVMMDEVPORT");
1350 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
1351 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
1352 break;
1353
1354 case VBOXGUEST_IOCTL_WAITEVENT:
1355 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
1356 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, (VBoxGuestWaitEventInfo *)pvData, pcbDataReturned,
1357 pSession->R0Process != NIL_RTR0PROCESS);
1358 break;
1359
1360 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
1361 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
1362 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, (VBoxGuestFilterMaskInfo *)pvData);
1363 break;
1364
1365#ifdef VBOX_WITH_HGCM
1366 case VBOXGUEST_IOCTL_HGCM_CONNECT:
1367# ifdef RT_ARCH_AMD64
1368 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
1369# endif
1370 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
1371 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
1372 break;
1373
1374 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
1375# ifdef RT_ARCH_AMD64
1376 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
1377# endif
1378 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
1379 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
1380 break;
1381
1382 case VBOXGUEST_IOCTL_CLIPBOARD_CONNECT:
1383 CHECKRET_MIN_SIZE("CLIPBOARD_CONNECT", sizeof(uint32_t));
1384 rc = VBoxGuestCommonIOCtl_HGCMClipboardReConnect(pDevExt, (uint32_t *)pvData, pcbDataReturned);
1385 break;
1386#endif /* VBOX_WITH_HGCM */
1387
1388 default:
1389 {
1390 Log(("VBoxGuestCommonIOCtl: Unknown request iFunction=%#x Stripped size=%#x\n", iFunction,
1391 VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
1392 rc = VERR_NOT_SUPPORTED;
1393 break;
1394 }
1395 }
1396 }
1397
1398 Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
1399 return rc;
1400}
1401
1402
1403
1404/**
1405 * Common interrupt service routine.
1406 *
1407 * This deals with events and with waking up thread waiting for those events.
1408 *
1409 * @returns true if it was our interrupt, false if it wasn't.
1410 * @param pDevExt The VBoxGuest device extension.
1411 */
1412bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
1413{
1414 bool fMousePositionChanged = false;
1415 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1416 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
1417 int rc = 0;
1418 bool fOurIrq;
1419
1420 /*
1421 * Make sure we've initalized the device extension.
1422 */
1423 if (RT_UNLIKELY(!pReq))
1424 return false;
1425
1426 /*
1427 * Enter the spinlock and check if it's our IRQ or not.
1428 */
1429 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1430 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
1431 if (fOurIrq)
1432 {
1433 /*
1434 * Acknowlegde events.
1435 * We don't use VbglGRPerform here as it may take another spinlocks.
1436 */
1437 pReq->header.rc = VERR_INTERNAL_ERROR;
1438 pReq->events = 0;
1439 ASMCompilerBarrier();
1440 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
1441 ASMCompilerBarrier(); /* paranoia */
1442 if (RT_SUCCESS(pReq->header.rc))
1443 {
1444 uint32_t fEvents = pReq->events;
1445 PVBOXGUESTWAIT pWait;
1446
1447 Log(("VBoxGuestCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
1448
1449 /*
1450 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
1451 */
1452 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
1453 {
1454 fMousePositionChanged = true;
1455 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
1456 }
1457
1458#ifdef VBOX_WITH_HGCM
1459 /*
1460 * The HGCM event/list is kind of different in that we evaluate all entries.
1461 */
1462 if (fEvents & VMMDEV_EVENT_HGCM)
1463 {
1464 for (pWait = pDevExt->HGCMWaitList.pHead; pWait; pWait = pWait->pNext)
1465 if ( !pWait->fResEvents
1466 && (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE))
1467 {
1468 pWait->fResEvents = VMMDEV_EVENT_HGCM;
1469 rc |= RTSemEventMultiSignal(pWait->Event);
1470 }
1471 fEvents &= ~VMMDEV_EVENT_HGCM;
1472 }
1473#endif
1474
1475 /*
1476 * Normal FIFO waiter evaluation.
1477 */
1478 fEvents |= pDevExt->f32PendingEvents;
1479 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
1480 if ( (pWait->fReqEvents & fEvents)
1481 && !pWait->fResEvents)
1482 {
1483 pWait->fResEvents = pWait->fReqEvents & fEvents;
1484 fEvents &= ~pWait->fResEvents;
1485 rc |= RTSemEventMultiSignal(pWait->Event);
1486 if (!fEvents)
1487 break;
1488 }
1489 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
1490 }
1491 else /* something is serious wrong... */
1492 Log(("VBoxGuestCommonISR: acknowledge events failed rc=%d (events=%#x)!!\n",
1493 pReq->header.rc, pReq->events));
1494 }
1495 else
1496 LogFlow(("VBoxGuestCommonISR: not ours\n"));
1497
1498 /*
1499 * Work the poll and async notification queues on OSes that implements that.
1500 * Do this outside the spinlock to prevent some recursive spinlocking.
1501 */
1502 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1503
1504 if (fMousePositionChanged)
1505 {
1506 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
1507 VBoxGuestNativeISRMousePollEvent(pDevExt);
1508 }
1509
1510 Assert(rc == 0);
1511 return fOurIrq;
1512}
1513
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette