VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 14221

Last change on this file since 14221 was 14221, checked in by vboxsync, 16 years ago

Additions/VBoxGuest: Add VBOXGUEST_IOCTL_CALL_TIMEOUT support on non-Windows and Linux guests (disabled until someone can test it)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 51.7 KB
Line 
1/** $Id: */
2/** @file
3 * VBoxGuest - Guest Additions Driver.
4 */
5
6/*
7 * Copyright (C) 2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23
24/*******************************************************************************
25* Header Files *
26*******************************************************************************/
27#define LOG_GROUP LOG_GROUP_DEFAULT
28#include "VBoxGuestInternal.h"
29#include <VBox/VBoxDev.h> /* for VMMDEV_RAM_SIZE */
30#include <VBox/log.h>
31#include <iprt/mem.h>
32#include <iprt/time.h>
33#include <iprt/memobj.h>
34#include <iprt/asm.h>
35#include <iprt/string.h>
36#include <iprt/process.h>
37#include <iprt/assert.h>
38#include <iprt/param.h>
39#ifdef VBOX_WITH_HGCM
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Internal Functions *
46*******************************************************************************/
47#ifdef VBOX_WITH_HGCM
48static DECLCALLBACK(void) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
49#endif
50
51
52
53/**
54 * Reserves memory in which the VMM can relocate any guest mappings
55 * that are floating around.
56 *
57 * This operation is a little bit tricky since the VMM might not accept
58 * just any address because of address clashes between the three contexts
59 * it operates in, so use a small stack to perform this operation.
60 *
61 * @returns VBox status code (ignored).
62 * @param pDevExt The device extension.
63 */
64static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
65{
66 /** @todo implement this using RTR0MemObjReserveKernel() (it needs to be implemented everywhere too). */
67 return VINF_SUCCESS;
68}
69
70
71/**
72 * Initializes the interrupt filter mask.
73 *
74 * This will ASSUME that we're the ones in carge over the mask, so
75 * we'll simply clear all bits we don't set.
76 *
77 * @returns VBox status code (ignored).
78 * @param pDevExt The device extension.
79 * @param fMask The new mask.
80 */
81static int vboxGuestInitFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask)
82{
83 VMMDevCtlGuestFilterMask *pReq;
84 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
85 if (RT_SUCCESS(rc))
86 {
87 pReq->u32OrMask = fMask;
88 pReq->u32NotMask = ~fMask; /* It's an AND mask. */
89 rc = VbglGRPerform(&pReq->header);
90 if ( RT_FAILURE(rc)
91 || RT_FAILURE(pReq->header.rc))
92 LogRel(("vboxGuestInitCtlFilterMask: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
93 rc, pReq->header.rc));
94 VbglGRFree(&pReq->header);
95 }
96 return rc;
97}
98
99
100/**
101 * Report guest information to the VMMDev.
102 *
103 * @returns VBox status code.
104 * @param pDevExt The device extension.
105 * @param enmOSType The OS type to report.
106 */
107static int vboxGuestInitReportGuestInfo(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
108{
109 VMMDevReportGuestInfo *pReq;
110 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_ReportGuestInfo);
111 if (RT_SUCCESS(rc))
112 {
113 pReq->guestInfo.additionsVersion = VMMDEV_VERSION;
114 pReq->guestInfo.osType = enmOSType;
115 rc = VbglGRPerform(&pReq->header);
116 if ( RT_FAILURE(rc)
117 || RT_FAILURE(pReq->header.rc))
118 LogRel(("vboxGuestInitReportGuestInfo: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
119 rc, pReq->header.rc));
120 VbglGRFree(&pReq->header);
121 }
122 return rc;
123}
124
125
126/**
127 * Initializes the VBoxGuest device extension when the
128 * device driver is loaded.
129 *
130 * The native code locates the VMMDev on the PCI bus and retrieve
131 * the MMIO and I/O port ranges, this function will take care of
132 * mapping the MMIO memory (if present). Upon successful return
133 * the native code should set up the interrupt handler.
134 *
135 * @returns VBox status code.
136 *
137 * @param pDevExt The device extension. Allocated by the native code.
138 * @param IOPortBase The base of the I/O port range.
139 * @param pvMMIOBase The base of the MMIO memory mapping.
140 * This is optional, pass NULL if not present.
141 * @param cbMMIO The size of the MMIO memory mapping.
142 * This is optional, pass 0 if not present.
143 * @param enmOSType The guest OS type to report to the VMMDev.
144 */
145int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
146 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType)
147{
148 int rc, rc2;
149
150 /*
151 * Initalize the data.
152 */
153 pDevExt->IOPortBase = IOPortBase;
154 pDevExt->pVMMDevMemory = NULL;
155 pDevExt->pIrqAckEvents = NULL;
156 pDevExt->WaitList.pHead = NULL;
157 pDevExt->WaitList.pTail = NULL;
158#ifdef VBOX_WITH_HGCM
159 pDevExt->HGCMWaitList.pHead = NULL;
160 pDevExt->HGCMWaitList.pTail = NULL;
161#endif
162 pDevExt->FreeList.pHead = NULL;
163 pDevExt->FreeList.pTail = NULL;
164 pDevExt->f32PendingEvents = 0;
165 pDevExt->u32ClipboardClientId = 0;
166
167 /*
168 * If there is an MMIO region validate the version and size.
169 */
170 if (pvMMIOBase)
171 {
172 Assert(cbMMIO);
173 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
174 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
175 && pVMMDev->u32Size >= 32
176 && pVMMDev->u32Size <= cbMMIO)
177 {
178 pDevExt->pVMMDevMemory = pVMMDev;
179 Log(("VBoxGuestInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
180 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
181 }
182 else /* try live without it. */
183 LogRel(("VBoxGuestInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
184 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
185 }
186
187 /*
188 * Create the wait and seesion spinlocks.
189 */
190 rc = RTSpinlockCreate(&pDevExt->WaitSpinlock);
191 if (RT_SUCCESS(rc))
192 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock);
193 if (RT_FAILURE(rc))
194 {
195 Log(("VBoxGuestInitDevExt: failed to spinlock, rc=%d!\n", rc));
196 if (pDevExt->WaitSpinlock != NIL_RTSPINLOCK)
197 RTSpinlockDestroy(pDevExt->WaitSpinlock);
198 return rc;
199 }
200
201 /*
202 * Initialize the guest library and report the guest info back to VMMDev,
203 * set the interrupt control filter mask, and fixate the guest mappings
204 * made by the VMM.
205 */
206 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
207 if (RT_SUCCESS(rc))
208 {
209 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
210 if (RT_SUCCESS(rc))
211 {
212 rc = vboxGuestInitReportGuestInfo(pDevExt, enmOSType);
213 if (RT_SUCCESS(rc))
214 {
215#ifdef VBOX_WITH_HGCM
216 rc = vboxGuestInitFilterMask(pDevExt, VMMDEV_EVENT_HGCM);
217#else
218 rc = vboxGuestInitFilterMask(pDevExt, 0);
219#endif
220 if (RT_SUCCESS(rc))
221 {
222 /*
223 * Disable guest graphics capability by default. The guest specific
224 * graphics driver will re-enable this when it is necessary.
225 */
226 rc = VBoxGuestSetGuestCapabilities(0, VMMDEV_GUEST_SUPPORTS_GRAPHICS);
227 if (RT_SUCCESS(rc))
228 {
229 vboxGuestInitFixateGuestMappings(pDevExt);
230 Log(("VBoxGuestInitDevExt: returns success\n"));
231 return VINF_SUCCESS;
232 }
233 }
234 }
235
236 /* failure cleanup */
237 }
238 else
239 Log(("VBoxGuestInitDevExt: VBoxGRAlloc failed, rc=%Rrc\n", rc));
240
241 VbglTerminate();
242 }
243 else
244 Log(("VBoxGuestInitDevExt: VbglInit failed, rc=%Rrc\n", rc));
245
246 rc2 = RTSpinlockDestroy(pDevExt->WaitSpinlock); AssertRC(rc2);
247 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
248 return rc; /* (failed) */
249}
250
251
252/**
253 * Deletes all the items in a wait chain.
254 * @param pWait The head of the chain.
255 */
256static void VBoxGuestDeleteWaitList(PVBOXGUESTWAITLIST pList)
257{
258 while (pList->pHead)
259 {
260 PVBOXGUESTWAIT pWait = pList->pHead;
261 pList->pHead = pWait->pNext;
262
263 pWait->pNext = NULL;
264 pWait->pPrev = NULL;
265 int rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
266 pWait->Event = NIL_RTSEMEVENTMULTI;
267 RTMemFree(pWait);
268 }
269 pList->pHead = NULL;
270 pList->pTail = NULL;
271}
272
273
274/**
275 * Destroys the VBoxGuest device extension.
276 *
277 * The native code should call this before the driver is loaded,
278 * but don't call this on shutdown.
279 *
280 * @param pDevExt The device extension.
281 */
282void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
283{
284 int rc2;
285 Log(("VBoxGuestDeleteDevExt:\n"));
286
287/** @todo tell VMMDev that the guest additions are no longer running (clear all capability masks).
288 * Like calling VBoxGuestSetGuestCapabilities. This wasn't done initially since it was not
289 * relevant for OS/2. On solaris modules can be unloaded, so we should implement it.
290 */
291
292 rc2 = RTSpinlockDestroy(pDevExt->WaitSpinlock); AssertRC(rc2);
293 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
294
295 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
296#ifdef VBOX_WITH_HGCM
297 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
298#endif
299 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
300
301 VbglTerminate();
302
303 pDevExt->pVMMDevMemory = NULL;
304
305 pDevExt->IOPortBase = 0;
306 pDevExt->pIrqAckEvents = NULL;
307}
308
309
310/**
311 * Creates a VBoxGuest user session.
312 *
313 * The native code calls this when a ring-3 client opens the device.
314 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
315 *
316 * @returns VBox status code.
317 * @param pDevExt The device extension.
318 * @param ppSession Where to store the session on success.
319 */
320int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
321{
322 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
323 if (RT_UNLIKELY(!pSession))
324 {
325 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
326 return VERR_NO_MEMORY;
327 }
328
329 pSession->Process = RTProcSelf();
330 pSession->R0Process = RTR0ProcHandleSelf();
331 pSession->pDevExt = pDevExt;
332
333 *ppSession = pSession;
334 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
335 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
336 return VINF_SUCCESS;
337}
338
339
340/**
341 * Creates a VBoxGuest kernel session.
342 *
343 * The native code calls this when a ring-0 client connects to the device.
344 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
345 *
346 * @returns VBox status code.
347 * @param pDevExt The device extension.
348 * @param ppSession Where to store the session on success.
349 */
350int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
351{
352 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
353 if (RT_UNLIKELY(!pSession))
354 {
355 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
356 return VERR_NO_MEMORY;
357 }
358
359 pSession->Process = NIL_RTPROCESS;
360 pSession->R0Process = NIL_RTR0PROCESS;
361 pSession->pDevExt = pDevExt;
362
363 *ppSession = pSession;
364 LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
365 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
366 return VINF_SUCCESS;
367}
368
369
370
371/**
372 * Closes a VBoxGuest session.
373 *
374 * @param pDevExt The device extension.
375 * @param pSession The session to close (and free).
376 */
377void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
378{
379 Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
380 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
381
382#ifdef VBOX_WITH_HGCM
383 for (unsigned i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
384 if (pSession->aHGCMClientIds[i])
385 {
386 VBoxGuestHGCMDisconnectInfo Info;
387 Info.result = 0;
388 Info.u32ClientID = pSession->aHGCMClientIds[i];
389 pSession->aHGCMClientIds[i] = 0;
390 Log(("VBoxGuestCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
391 VbglHGCMDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, false /* uninterruptible */);
392 }
393#endif
394
395 pSession->pDevExt = NULL;
396 pSession->Process = NIL_RTPROCESS;
397 pSession->R0Process = NIL_RTR0PROCESS;
398 RTMemFree(pSession);
399}
400
401
402/**
403 * Links the wait-for-event entry into the tail of the given list.
404 *
405 * @param pList The list to link it into.
406 * @param pWait The wait for event entry to append.
407 */
408DECLINLINE(void) VBoxGuestWaitAppend(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
409{
410 const PVBOXGUESTWAIT pTail = pList->pTail;
411 pWait->pNext = NULL;
412 pWait->pPrev = pTail;
413 if (pTail)
414 pTail->pNext = pWait;
415 else
416 pList->pHead = pWait;
417 pList->pTail = pWait;
418}
419
420
421/**
422 * Unlinks the wait-for-event entry.
423 *
424 * @param pList The list to unlink it from.
425 * @param pWait The wait for event entry to unlink.
426 */
427DECLINLINE(void) VBoxGuestWaitUnlink(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
428{
429 const PVBOXGUESTWAIT pPrev = pWait->pPrev;
430 const PVBOXGUESTWAIT pNext = pWait->pNext;
431 if (pNext)
432 pNext->pPrev = pPrev;
433 else
434 pList->pTail = pPrev;
435 if (pPrev)
436 pPrev->pNext = pNext;
437 else
438 pList->pHead = pNext;
439}
440
441
442/**
443 * Allocates a wiat-for-event entry.
444 *
445 * @returns The wait-for-event entry.
446 * @param pDevExt The device extension.
447 */
448static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt)
449{
450 /*
451 * Allocate it one way or the other.
452 */
453 PVBOXGUESTWAIT pWait = pDevExt->FreeList.pTail;
454 if (pWait)
455 {
456 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
457 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
458
459 pWait = pDevExt->FreeList.pTail;
460 if (pWait)
461 VBoxGuestWaitUnlink(&pDevExt->FreeList, pWait);
462
463 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
464 }
465 if (!pWait)
466 {
467 static unsigned s_cErrors = 0;
468
469 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
470 if (!pWait)
471 {
472 if (s_cErrors++ < 32)
473 LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
474 return NULL;
475 }
476
477 int rc = RTSemEventMultiCreate(&pWait->Event);
478 if (RT_FAILURE(rc))
479 {
480 if (s_cErrors++ < 32)
481 LogRel(("VBoxGuestCommonIOCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
482 RTMemFree(pWait);
483 return NULL;
484 }
485 }
486
487 /*
488 * Zero members just as an precaution.
489 */
490 pWait->pNext = NULL;
491 pWait->pPrev = NULL;
492 pWait->fReqEvents = 0;
493 pWait->fResEvents = 0;
494#ifdef VBOX_WITH_HGCM
495 pWait->pHGCMReq = NULL;
496#endif
497 RTSemEventMultiReset(pWait->Event);
498 return pWait;
499}
500
501
502/**
503 * Frees the wait-for-event entry.
504 * The caller must own the wait spinlock!
505 *
506 * @param pDevExt The device extension.
507 * @param pWait The wait-for-event entry to free.
508 */
509static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
510{
511 pWait->fReqEvents = 0;
512 pWait->fResEvents = 0;
513#ifdef VBOX_WITH_HGCM
514 pWait->pHGCMReq = NULL;
515#endif
516 VBoxGuestWaitAppend(&pDevExt->FreeList, pWait);
517}
518
519
520/**
521 * Frees the wait-for-event entry.
522 *
523 * @param pDevExt The device extension.
524 * @param pWait The wait-for-event entry to free.
525 */
526static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
527{
528 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
529 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
530 VBoxGuestWaitFreeLocked(pDevExt, pWait);
531 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
532}
533
534
535/**
536 * Modifies the guest capabilities.
537 *
538 * Should be called during driver init and termination.
539 *
540 * @returns VBox status code.
541 * @param fOr The Or mask (what to enable).
542 * @param fNot The Not mask (what to disable).
543 */
544int VBoxGuestSetGuestCapabilities(uint32_t fOr, uint32_t fNot)
545{
546 VMMDevReqGuestCapabilities2 *pReq;
547 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
548 if (RT_FAILURE(rc))
549 {
550 Log(("VBoxGuestSetGuestCapabilities: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
551 sizeof(*pReq), sizeof(*pReq), rc));
552 return rc;
553 }
554
555 pReq->u32OrMask = fOr;
556 pReq->u32NotMask = fNot;
557
558 rc = VbglGRPerform(&pReq->header);
559 if (RT_FAILURE(rc))
560 Log(("VBoxGuestSetGuestCapabilities:VbglGRPerform failed, rc=%Rrc!\n", rc));
561 else if (RT_FAILURE(pReq->header.rc))
562 {
563 Log(("VBoxGuestSetGuestCapabilities: The request failed; VMMDev rc=%Rrc!\n", pReq->header.rc));
564 rc = pReq->header.rc;
565 }
566
567 VbglGRFree(&pReq->header);
568 return rc;
569}
570
571
572/**
573 * Implements the fast (no input or output) type of IOCtls.
574 *
575 * This is currently just a placeholder stub inherited from the support driver code.
576 *
577 * @returns VBox status code.
578 * @param iFunction The IOCtl function number.
579 * @param pDevExt The device extension.
580 * @param pSession The session.
581 */
582int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
583{
584 Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
585
586 return VERR_NOT_SUPPORTED;
587}
588
589
590
591static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
592{
593 Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
594 pInfo->portAddress = pDevExt->IOPortBase;
595 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
596 if (pcbDataReturned)
597 *pcbDataReturned = sizeof(*pInfo);
598 return VINF_SUCCESS;
599}
600
601
602/**
603 * Worker VBoxGuestCommonIOCtl_WaitEvent.
604 * The caller enters the spinlock, we may or may not leave it.
605 *
606 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
607 */
608DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWaitEventInfo *pInfo,
609 int iEvent, const uint32_t fReqEvents, PRTSPINLOCKTMP pTmp)
610{
611 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
612 if (fMatches)
613 {
614 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
615 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, pTmp);
616
617 pInfo->u32EventFlagsOut = fMatches;
618 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
619 if (fReqEvents & ~((uint32_t)1 << iEvent))
620 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
621 else
622 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
623 return VINF_SUCCESS;
624 }
625 return VERR_TIMEOUT;
626}
627
628
629static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned,
630 bool fInterruptible)
631{
632 pInfo->u32EventFlagsOut = 0;
633 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
634 if (pcbDataReturned)
635 *pcbDataReturned = sizeof(*pInfo);
636
637 /*
638 * Copy and verify the input mask.
639 */
640 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
641 int iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
642 if (RT_UNLIKELY(iEvent < 0))
643 {
644 Log(("VBoxGuestCommonIOCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
645 return VERR_INVALID_PARAMETER;
646 }
647
648 /*
649 * Check the condition up front, before doing the wait-for-event allocations.
650 */
651 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
652 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
653 int rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
654 if (rc == VINF_SUCCESS)
655 return rc;
656 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
657
658 if (!pInfo->u32TimeoutIn)
659 {
660 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
661 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
662 return VERR_TIMEOUT;
663 }
664
665 PVBOXGUESTWAIT pWait = VBoxGuestWaitAlloc(pDevExt);
666 if (!pWait)
667 return VERR_NO_MEMORY;
668 pWait->fReqEvents = fReqEvents;
669
670 /*
671 * We've got the wait entry now, re-enter the spinlock and check for the condition.
672 * If the wait condition is met, return.
673 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
674 */
675 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
676 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
677 if (rc == VINF_SUCCESS)
678 {
679 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
680 return rc;
681 }
682 VBoxGuestWaitAppend(&pDevExt->WaitList, pWait);
683 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
684
685 if (fInterruptible)
686 rc = RTSemEventMultiWaitNoResume(pWait->Event,
687 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
688 else
689 rc = RTSemEventMultiWait(pWait->Event,
690 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
691
692 /*
693 * There is one special case here and that's when the semaphore is
694 * destroyed upon device driver unload. This shouldn't happen of course,
695 * but in case it does, just get out of here ASAP.
696 */
697 if (rc == VERR_SEM_DESTROYED)
698 return rc;
699
700 /*
701 * Unlink the wait item and dispose of it.
702 */
703 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
704 VBoxGuestWaitUnlink(&pDevExt->WaitList, pWait);
705 const uint32_t fResEvents = pWait->fResEvents;
706 VBoxGuestWaitFreeLocked(pDevExt, pWait);
707 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
708
709 /*
710 * Now deal with the return code.
711 */
712 if (fResEvents)
713 {
714 pInfo->u32EventFlagsOut = fResEvents;
715 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
716 if (fReqEvents & ~((uint32_t)1 << iEvent))
717 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
718 else
719 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
720 rc = VINF_SUCCESS;
721 }
722 else if (rc == VERR_TIMEOUT)
723 {
724 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
725 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
726 }
727 else if (rc == VERR_INTERRUPTED)
728 {
729 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
730 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
731 }
732 else
733 {
734 if (RT_SUCCESS(rc))
735 {
736 static unsigned s_cErrors = 0;
737 if (s_cErrors++ < 32)
738 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
739 rc = VERR_INTERNAL_ERROR;
740 }
741 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
742 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc\n", rc));
743 }
744
745 return rc;
746}
747
748
749static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, VMMDevRequestHeader *pReqHdr,
750 size_t cbData, size_t *pcbDataReturned)
751{
752 Log(("VBoxGuestCommonIOCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
753
754 /*
755 * Validate the header and request size.
756 */
757 const uint32_t cbReq = pReqHdr->size;
758 const uint32_t cbMinSize = vmmdevGetRequestSize(pReqHdr->requestType);
759 if (cbReq < cbMinSize)
760 {
761 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
762 cbReq, cbMinSize, pReqHdr->requestType));
763 return VERR_INVALID_PARAMETER;
764 }
765 if (cbReq > cbData)
766 {
767 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
768 cbData, cbReq, pReqHdr->requestType));
769 return VERR_INVALID_PARAMETER;
770 }
771
772 /*
773 * Make a copy of the request in the physical memory heap so
774 * the VBoxGuestLibrary can more easily deal with the request.
775 * (This is really a waste of time since the OS or the OS specific
776 * code has already buffered or locked the input/output buffer, but
777 * it does makes things a bit simpler wrt to phys address.)
778 */
779 VMMDevRequestHeader *pReqCopy;
780 int rc = VbglGRAlloc(&pReqCopy, cbReq, pReqHdr->requestType);
781 if (RT_FAILURE(rc))
782 {
783 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
784 cbReq, cbReq, rc));
785 return rc;
786 }
787
788 memcpy(pReqCopy, pReqHdr, cbReq);
789 rc = VbglGRPerform(pReqCopy);
790 if ( RT_SUCCESS(rc)
791 && RT_SUCCESS(pReqCopy->rc))
792 {
793 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
794 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
795
796 memcpy(pReqHdr, pReqCopy, cbReq);
797 if (pcbDataReturned)
798 *pcbDataReturned = cbReq;
799 }
800 else if (RT_FAILURE(rc))
801 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
802 else
803 {
804 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
805 rc = pReqCopy->rc;
806 }
807
808 VbglGRFree(pReqCopy);
809 return rc;
810}
811
812
813static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, VBoxGuestFilterMaskInfo *pInfo)
814{
815 VMMDevCtlGuestFilterMask *pReq;
816 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
817 if (RT_FAILURE(rc))
818 {
819 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
820 sizeof(*pReq), sizeof(*pReq), rc));
821 return rc;
822 }
823
824 pReq->u32OrMask = pInfo->u32OrMask;
825 pReq->u32NotMask = pInfo->u32NotMask;
826
827 rc = VbglGRPerform(&pReq->header);
828 if (RT_FAILURE(rc))
829 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: VbglGRPerform failed, rc=%Rrc!\n", rc));
830 else if (RT_FAILURE(pReq->header.rc))
831 {
832 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: The request failed; VMMDev rc=%Rrc!\n", pReq->header.rc));
833 rc = pReq->header.rc;
834 }
835
836 VbglGRFree(&pReq->header);
837 return rc;
838}
839
840
841#ifdef VBOX_WITH_HGCM
842
843/**
844 * This is a callback for dealing with async waits.
845 *
846 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
847 */
848static DECLCALLBACK(void)
849VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User)
850{
851 VMMDevHGCMRequestHeader volatile *pHdr = (VMMDevHGCMRequestHeader volatile *)pHdrNonVolatile;
852 const bool fInterruptible = (bool)u32User;
853 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
854 Log(("VBoxGuestHGCMAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
855
856 /*
857 * Check to see if the condition was met by the time we got here.
858 *
859 * We create a simple poll loop here for dealing with out-of-memory
860 * conditions since the caller isn't necessarily able to deal with
861 * us returning too early.
862 */
863 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
864 PVBOXGUESTWAIT pWait;
865 for (;;)
866 {
867 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
868 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
869 {
870 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
871 return;
872 }
873 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
874
875 pWait = VBoxGuestWaitAlloc(pDevExt);
876 if (pWait)
877 break;
878 if (fInterruptible)
879 return;
880 RTThreadSleep(1);
881 }
882 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
883 pWait->pHGCMReq = pHdr;
884
885 /*
886 * Re-enter the spinlock and re-check for the condition.
887 * If the condition is met, return.
888 * Otherwise link us into the HGCM wait list and go to sleep.
889 */
890 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
891 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
892 {
893 VBoxGuestWaitFreeLocked(pDevExt, pWait);
894 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
895 return;
896 }
897 VBoxGuestWaitAppend(&pDevExt->HGCMWaitList, pWait);
898 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
899
900 int rc;
901 if (fInterruptible)
902 rc = RTSemEventMultiWaitNoResume(pWait->Event, RT_INDEFINITE_WAIT);
903 else
904 rc = RTSemEventMultiWait(pWait->Event, RT_INDEFINITE_WAIT);
905
906 /*
907 * This shouldn't ever return failure...
908 * Unlink, free and return.
909 */
910 if (rc == VERR_SEM_DESTROYED)
911 return;
912 if (RT_FAILURE(rc))
913 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
914
915 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
916 VBoxGuestWaitUnlink(&pDevExt->HGCMWaitList, pWait);
917 VBoxGuestWaitFreeLocked(pDevExt, pWait);
918 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
919}
920
921#ifdef HGCM_TIMEOUT
922/**
923 * This is a callback for dealing with async waits with a timeout.
924 *
925 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
926 */
927static DECLCALLBACK(void)
928VBoxGuestHGCMAsyncWaitCallbackTimeout(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser,
929 uint32_t u32User)
930{
931 VMMDevHGCMRequestHeader volatile *pHdr = (VMMDevHGCMRequestHeader volatile *)pHdrNonVolatile;
932 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
933 Log(("VBoxGuestHGCMAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
934
935 /*
936 * Check to see if the condition was met by the time we got here.
937 *
938 * We create a simple poll loop here for dealing with out-of-memory
939 * conditions since the caller isn't necessarily able to deal with
940 * us returning too early.
941 */
942 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
943 PVBOXGUESTWAIT pWait;
944 for (;;)
945 {
946 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
947 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
948 {
949 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
950 return;
951 }
952 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
953
954 pWait = VBoxGuestWaitAlloc(pDevExt);
955 if (pWait)
956 break;
957 return;
958 }
959 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
960 pWait->pHGCMReq = pHdr;
961
962 /*
963 * Re-enter the spinlock and re-check for the condition.
964 * If the condition is met, return.
965 * Otherwise link us into the HGCM wait list and go to sleep.
966 */
967 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
968 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
969 {
970 VBoxGuestWaitFreeLocked(pDevExt, pWait);
971 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
972 return;
973 }
974 VBoxGuestWaitAppend(&pDevExt->HGCMWaitList, pWait);
975 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
976
977 int rc = RTSemEventMultiWaitNoResume(pWait->Event, u32User);
978
979 /*
980 * This shouldn't ever return failure...
981 * Unlink, free and return.
982 */
983 if (rc == VERR_SEM_DESTROYED)
984 return;
985 if (RT_FAILURE(rc))
986 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
987
988 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
989 VBoxGuestWaitUnlink(&pDevExt->HGCMWaitList, pWait);
990 VBoxGuestWaitFreeLocked(pDevExt, pWait);
991 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
992}
993#endif /* HGCM_TIMEOUT */
994
995
996static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMConnectInfo *pInfo,
997 size_t *pcbDataReturned)
998{
999 /*
1000 * The VbglHGCMConnect call will invoke the callback if the HGCM
1001 * call is performed in an ASYNC fashion. The function is not able
1002 * to deal with cancelled requests.
1003 */
1004 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
1005 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
1006 ? pInfo->Loc.u.host.achName : "<not local host>"));
1007
1008 int rc = VbglHGCMConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, false /* uninterruptible */);
1009 if (RT_SUCCESS(rc))
1010 {
1011 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
1012 pInfo->u32ClientID, pInfo->result, rc));
1013 if (RT_SUCCESS(pInfo->result))
1014 {
1015 /*
1016 * Append the client id to the client id table.
1017 * If the table has somehow become filled up, we'll disconnect the session.
1018 */
1019 unsigned i;
1020 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1021 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1022 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1023 if (!pSession->aHGCMClientIds[i])
1024 {
1025 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
1026 break;
1027 }
1028 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1029 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1030 {
1031 static unsigned s_cErrors = 0;
1032 if (s_cErrors++ < 32)
1033 LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
1034
1035 VBoxGuestHGCMDisconnectInfo Info;
1036 Info.result = 0;
1037 Info.u32ClientID = pInfo->u32ClientID;
1038 VbglHGCMDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, false /* uninterruptible */);
1039 return VERR_TOO_MANY_OPEN_FILES;
1040 }
1041 }
1042 if (pcbDataReturned)
1043 *pcbDataReturned = sizeof(*pInfo);
1044 }
1045 return rc;
1046}
1047
1048
1049static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
1050 size_t *pcbDataReturned)
1051{
1052 /*
1053 * Validate the client id and invalidate its entry while we're in the call.
1054 */
1055 const uint32_t u32ClientId = pInfo->u32ClientID;
1056 unsigned i;
1057 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1058 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1059 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1060 if (pSession->aHGCMClientIds[i] == u32ClientId)
1061 {
1062 pSession->aHGCMClientIds[i] = UINT32_MAX;
1063 break;
1064 }
1065 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1066 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1067 {
1068 static unsigned s_cErrors = 0;
1069 if (s_cErrors++ > 32)
1070 LogRel(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
1071 return VERR_INVALID_HANDLE;
1072 }
1073
1074 /*
1075 * The VbglHGCMConnect call will invoke the callback if the HGCM
1076 * call is performed in an ASYNC fashion. The function is not able
1077 * to deal with cancelled requests.
1078 */
1079 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
1080 int rc = VbglHGCMDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, false /* uninterruptible */);
1081 if (RT_SUCCESS(rc))
1082 {
1083 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
1084 if (pcbDataReturned)
1085 *pcbDataReturned = sizeof(*pInfo);
1086 }
1087
1088 /* Update the client id array according to the result. */
1089 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1090 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
1091 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
1092 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1093
1094 return rc;
1095}
1096
1097
1098static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMCallInfo *pInfo,
1099 size_t cbData, size_t *pcbDataReturned)
1100{
1101 /*
1102 * Some more validations.
1103 */
1104 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
1105 {
1106 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
1107 return VERR_INVALID_PARAMETER;
1108 }
1109 const size_t cbActual = sizeof(*pInfo) + pInfo->cParms * sizeof(HGCMFunctionParameter);
1110 if (cbData < cbActual)
1111 {
1112 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
1113 cbData, cbActual));
1114 return VERR_INVALID_PARAMETER;
1115 }
1116
1117 /*
1118 * Validate the client id.
1119 */
1120 const uint32_t u32ClientId = pInfo->u32ClientID;
1121 unsigned i;
1122 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1123 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1124 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1125 if (pSession->aHGCMClientIds[i] == u32ClientId)
1126 break;
1127 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1128 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
1129 {
1130 static unsigned s_cErrors = 0;
1131 if (s_cErrors++ > 32)
1132 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
1133 return VERR_INVALID_HANDLE;
1134 }
1135
1136 /*
1137 * The VbglHGCMCall call will invoke the callback if the HGCM
1138 * call is performed in an ASYNC fashion. This function can
1139 * deal with cancelled requests, so we let user more requests
1140 * be interruptible (should add a flag for this later I guess).
1141 */
1142 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
1143 int rc = VbglHGCMCall(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, pSession->R0Process != NIL_RTR0PROCESS);
1144 if (RT_SUCCESS(rc))
1145 {
1146 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
1147 if (pcbDataReturned)
1148 *pcbDataReturned = cbActual;
1149 }
1150 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: Failed. rc=%Rrc.\n", rc));
1151 return rc;
1152}
1153
1154
1155#ifdef /* HGCM_TIMEOUT */
1156static int VBoxGuestCommonIOCtl_HGCMCallTimeout(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMCallInfoTimeout *pInfoTimeout,
1157 size_t cbData, size_t *pcbDataReturned)
1158{
1159 VBoxGuestHGCMCallInfo *pInfo = &pInfoTimeout->info;
1160 /*
1161 * Some more validations.
1162 */
1163 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
1164 {
1165 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
1166 return VERR_INVALID_PARAMETER;
1167 }
1168 const size_t cbActual = sizeof(*pInfoTimeout) + pInfo->cParms * sizeof(HGCMFunctionParameter);
1169 if (cbData < cbActual)
1170 {
1171 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
1172 cbData, cbActual));
1173 return VERR_INVALID_PARAMETER;
1174 }
1175
1176 /*
1177 * Validate the client id.
1178 */
1179 const uint32_t u32ClientId = pInfo->u32ClientID;
1180 unsigned i;
1181 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1182 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1183 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1184 if (pSession->aHGCMClientIds[i] == u32ClientId)
1185 break;
1186 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1187 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
1188 {
1189 static unsigned s_cErrors = 0;
1190 if (s_cErrors++ > 32)
1191 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
1192 return VERR_INVALID_HANDLE;
1193 }
1194
1195 /*
1196 * The VbglHGCMCall call will invoke the callback if the HGCM
1197 * call is performed in an ASYNC fashion. This function can
1198 * deal with cancelled requests, so we let user more requests
1199 * be interruptible (should add a flag for this later I guess).
1200 */
1201 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
1202 int rc = VbglHGCMCall(pInfo, VBoxGuestHGCMAsyncWaitCallbackTimeout, pDevExt, pInfoTimeout->u32Timeout);
1203 if (RT_SUCCESS(rc))
1204 {
1205 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
1206 if (pcbDataReturned)
1207 *pcbDataReturned = cbActual;
1208 }
1209 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: Failed. rc=%Rrc.\n", rc));
1210 return rc;
1211}
1212#endif /* HGCM_TIMEOUT */
1213
1214
1215/**
1216 * @returns VBox status code. Unlike the other HGCM IOCtls this will combine
1217 * the VbglHGCMConnect/Disconnect return code with the Info.result.
1218 */
1219static int VBoxGuestCommonIOCtl_HGCMClipboardReConnect(PVBOXGUESTDEVEXT pDevExt, uint32_t *pu32ClientId, size_t *pcbDataReturned)
1220{
1221 int rc;
1222 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: Current u32ClientId=%RX32\n", pDevExt->u32ClipboardClientId));
1223
1224
1225 /*
1226 * If there is an old client, try disconnect it first.
1227 */
1228 if (pDevExt->u32ClipboardClientId != 0)
1229 {
1230 VBoxGuestHGCMDisconnectInfo Info;
1231 Info.result = (uint32_t)VERR_WRONG_ORDER; /** @todo Vitali, why is this member unsigned? */
1232 Info.u32ClientID = pDevExt->u32ClipboardClientId;
1233 rc = VbglHGCMDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, 0);
1234 if (RT_SUCCESS(rc))
1235 {
1236 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. VbglHGCMDisconnect -> rc=%Rrc\n", rc));
1237 return rc;
1238 }
1239 if (RT_FAILURE((int32_t)Info.result))
1240 {
1241 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. Info.result=%Rrc\n", rc));
1242 return Info.result;
1243 }
1244 pDevExt->u32ClipboardClientId = 0;
1245 }
1246
1247 /*
1248 * Try connect.
1249 */
1250 VBoxGuestHGCMConnectInfo Info;
1251 Info.Loc.type = VMMDevHGCMLoc_LocalHost_Existing;
1252 strcpy(Info.Loc.u.host.achName, "VBoxSharedClipboard");
1253 Info.u32ClientID = 0;
1254 Info.result = (uint32_t)VERR_WRONG_ORDER;
1255
1256 rc = VbglHGCMConnect(&Info,VBoxGuestHGCMAsyncWaitCallback, pDevExt, 0);
1257 if (RT_FAILURE(rc))
1258 {
1259 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1260 return rc;
1261 }
1262 if (RT_FAILURE((int32_t)Info.result))
1263 {
1264 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1265 return rc;
1266 }
1267
1268 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: connected successfully u32ClientId=%RX32\n", Info.u32ClientID));
1269
1270 pDevExt->u32ClipboardClientId = Info.u32ClientID;
1271 *pu32ClientId = Info.u32ClientID;
1272 if (pcbDataReturned)
1273 *pcbDataReturned = sizeof(uint32_t);
1274
1275 return VINF_SUCCESS;
1276}
1277
1278#endif /* VBOX_WITH_HGCM */
1279
1280
1281/**
1282 * Guest backdoor logging.
1283 *
1284 * @returns VBox status code.
1285 *
1286 * @param pch The log message (need not be NULL terminated).
1287 * @param cbData Size of the buffer.
1288 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
1289 */
1290static int VBoxGuestCommonIOCtl_Log(const char *pch, size_t cbData, size_t *pcbDataReturned)
1291{
1292 Log(("%.*s", cbData, pch));
1293 if (pcbDataReturned)
1294 *pcbDataReturned = 0;
1295 return VINF_SUCCESS;
1296}
1297
1298
1299/**
1300 * Common IOCtl for user to kernel and kernel to kernel communcation.
1301 *
1302 * This function only does the basic validation and then invokes
1303 * worker functions that takes care of each specific function.
1304 *
1305 * @returns VBox status code.
1306 *
1307 * @param iFunction The requested function.
1308 * @param pDevExt The device extension.
1309 * @param pSession The client session.
1310 * @param pvData The input/output data buffer. Can be NULL depending on the function.
1311 * @param cbData The max size of the data buffer.
1312 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
1313 */
1314int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1315 void *pvData, size_t cbData, size_t *pcbDataReturned)
1316{
1317 Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
1318 iFunction, pDevExt, pSession, pvData, cbData));
1319
1320 /*
1321 * Define some helper macros to simplify validation.
1322 */
1323#define CHECKRET_RING0(mnemonic) \
1324 do { \
1325 if (pSession->R0Process != NIL_RTR0PROCESS) \
1326 { \
1327 Log(("VBoxGuestCommonIOCtl: " mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
1328 pSession->Process, (uintptr_t)pSession->R0Process)); \
1329 return VERR_PERMISSION_DENIED; \
1330 } \
1331 } while (0)
1332#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
1333 do { \
1334 if (cbData < (cbMin)) \
1335 { \
1336 Log(("VBoxGuestCommonIOCtl: " mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
1337 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
1338 return VERR_BUFFER_OVERFLOW; \
1339 } \
1340 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
1341 { \
1342 Log(("VBoxGuestCommonIOCtl: " mnemonic ": Invalid pointer %p\n", pvData)); \
1343 return VERR_INVALID_POINTER; \
1344 } \
1345 } while (0)
1346
1347
1348 /*
1349 * Deal with variably sized requests first.
1350 */
1351 int rc = VINF_SUCCESS;
1352 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
1353 {
1354 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
1355 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
1356 }
1357#ifdef VBOX_WITH_HGCM
1358 /*
1359 * These ones are tricky and can be done later.
1360 */
1361 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
1362 {
1363 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
1364 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, cbData, pcbDataReturned);
1365 }
1366#ifdef HGCM_TIMEOUT
1367 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMEOUT(0)))
1368 {
1369 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfoTimeout));
1370 rc = VBoxGuestCommonIOCtl_HGCMCallTimeout(pDevExt, pSession, (VBoxGuestHGCMCallInfoTimeout *)pvData, cbData, pcbDataReturned);
1371 }
1372#endif /* HGCM_TIMEOUT */
1373#endif /* VBOX_WITH_HGCM */
1374 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
1375 {
1376 CHECKRET_MIN_SIZE("LOG", 1);
1377 rc = VBoxGuestCommonIOCtl_Log((char *)pvData, cbData, pcbDataReturned);
1378 }
1379 else
1380 {
1381 switch (iFunction)
1382 {
1383 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
1384 CHECKRET_RING0("GETVMMDEVPORT");
1385 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
1386 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
1387 break;
1388
1389 case VBOXGUEST_IOCTL_WAITEVENT:
1390 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
1391 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, (VBoxGuestWaitEventInfo *)pvData, pcbDataReturned,
1392 pSession->R0Process != NIL_RTR0PROCESS);
1393 break;
1394
1395 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
1396 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
1397 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, (VBoxGuestFilterMaskInfo *)pvData);
1398 break;
1399
1400#ifdef VBOX_WITH_HGCM
1401 case VBOXGUEST_IOCTL_HGCM_CONNECT:
1402 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
1403 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
1404 break;
1405
1406 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
1407 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
1408 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
1409 break;
1410
1411 case VBOXGUEST_IOCTL_CLIPBOARD_CONNECT:
1412 CHECKRET_MIN_SIZE("CLIPBOARD_CONNECT", sizeof(uint32_t));
1413 rc = VBoxGuestCommonIOCtl_HGCMClipboardReConnect(pDevExt, (uint32_t *)pvData, pcbDataReturned);
1414 break;
1415#endif /* VBOX_WITH_HGCM */
1416
1417 default:
1418 {
1419 Log(("VBoxGuestCommonIOCtl: Unknown request iFunction=%#x Stripped size=%#x\n", iFunction,
1420 VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
1421 rc = VERR_NOT_SUPPORTED;
1422 break;
1423 }
1424 }
1425 }
1426
1427 Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
1428 return rc;
1429}
1430
1431
1432
1433/**
1434 * Common interrupt service routine.
1435 *
1436 * This deals with events and with waking up thread waiting for those events.
1437 *
1438 * @returns true if it was our interrupt, false if it wasn't.
1439 * @param pDevExt The VBoxGuest device extension.
1440 */
1441bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
1442{
1443 /*
1444 * Now we have to find out whether it was our IRQ. Read the event mask
1445 * from our device to see if there are any pending events.
1446 */
1447 bool fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
1448 if (fOurIrq)
1449 {
1450 /* Acknowlegde events. */
1451 VMMDevEvents *pReq = pDevExt->pIrqAckEvents;
1452 int rc = VbglGRPerform(&pReq->header);
1453 if ( RT_SUCCESS(rc)
1454 && RT_SUCCESS(pReq->header.rc))
1455 {
1456 uint32_t fEvents = pReq->events;
1457 Log(("VBoxGuestCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
1458
1459 /*
1460 * Enter the spinlock and examin the waiting threads.
1461 */
1462 int rc2 = 0;
1463 PVBOXGUESTWAIT pWait;
1464 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1465 RTSpinlockAcquireNoInts(pDevExt->WaitSpinlock, &Tmp);
1466
1467#ifdef VBOX_WITH_HGCM
1468 /* The HGCM event/list is kind of different in that we evaluate all entries. */
1469 if (fEvents & VMMDEV_EVENT_HGCM)
1470 for (pWait = pDevExt->HGCMWaitList.pHead; pWait; pWait = pWait->pNext)
1471 if ( !pWait->fResEvents
1472 && (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE))
1473 {
1474 pWait->fResEvents = VMMDEV_EVENT_HGCM;
1475 rc2 |= RTSemEventMultiSignal(pWait->Event);
1476 }
1477#endif
1478
1479 /* Normal FIFO evaluation. */
1480 fEvents |= pDevExt->f32PendingEvents;
1481 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
1482 if (!pWait->fResEvents)
1483 {
1484 pWait->fResEvents = pWait->fReqEvents & fEvents;
1485 fEvents &= ~pWait->fResEvents;
1486 rc2 |= RTSemEventMultiSignal(pWait->Event);
1487 if (!fEvents)
1488 break;
1489 }
1490
1491 ASMAtomicXchgU32(&pDevExt->f32PendingEvents, fEvents);
1492 RTSpinlockReleaseNoInts(pDevExt->WaitSpinlock, &Tmp);
1493 Assert(rc2 == 0);
1494 }
1495 else /* something is serious wrong... */
1496 Log(("VBoxGuestCommonISR: acknowledge events failed rc=%d, header rc=%d (events=%#x)!!\n",
1497 rc, pReq->header.rc, pReq->events));
1498 }
1499 else
1500 LogFlow(("VBoxGuestCommonISR: not ours\n"));
1501
1502 return fOurIrq;
1503}
1504
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette