VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 26504

Last change on this file since 26504 was 26504, checked in by vboxsync, 15 years ago

VBoxGuest.cpp,vboxvfs_vfsops.c: tabs.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 62.5 KB
Line 
1/* $Id: VBoxGuest.cpp 26504 2010-02-14 09:08:42Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2009 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_DEFAULT
27#include "VBoxGuestInternal.h"
28#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
29#include <VBox/log.h>
30#include <iprt/mem.h>
31#include <iprt/time.h>
32#include <iprt/memobj.h>
33#include <iprt/asm.h>
34#include <iprt/string.h>
35#include <iprt/process.h>
36#include <iprt/assert.h>
37#include <iprt/param.h>
38#ifdef VBOX_WITH_HGCM
39# include <iprt/thread.h>
40#endif
41
42
43/*******************************************************************************
44* Internal Functions *
45*******************************************************************************/
46#ifdef VBOX_WITH_HGCM
47static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
48#endif
49
50
51
52/**
53 * Reserves memory in which the VMM can relocate any guest mappings
54 * that are floating around.
55 *
56 * This operation is a little bit tricky since the VMM might not accept
57 * just any address because of address clashes between the three contexts
58 * it operates in, so use a small stack to perform this operation.
59 *
60 * @returns VBox status code (ignored).
61 * @param pDevExt The device extension.
62 */
63static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
64{
65 /*
66 * Query the required space.
67 */
68 VMMDevReqHypervisorInfo *pReq;
69 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
70 if (RT_FAILURE(rc))
71 return rc;
72 pReq->hypervisorStart = 0;
73 pReq->hypervisorSize = 0;
74 rc = VbglGRPerform(&pReq->header);
75 if (RT_FAILURE(rc)) /* this shouldn't happen! */
76 {
77 VbglGRFree(&pReq->header);
78 return rc;
79 }
80
81 /*
82 * The VMM will report back if there is nothing it wants to map, like for
83 * insance in VT-x and AMD-V mode.
84 */
85 if (pReq->hypervisorSize == 0)
86 Log(("vboxGuestInitFixateGuestMappings: nothing to do\n"));
87 else
88 {
89 /*
90 * We have to try several times since the host can be picky
91 * about certain addresses.
92 */
93 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
94 uint32_t cbHypervisor = pReq->hypervisorSize;
95 RTR0MEMOBJ ahTries[5];
96 uint32_t iTry;
97 bool fBitched = false;
98 Log(("vboxGuestInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
99 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
100 {
101 /*
102 * Reserve space, or if that isn't supported, create a object for
103 * some fictive physical memory and map that in to kernel space.
104 *
105 * To make the code a bit uglier, most systems cannot help with
106 * 4MB alignment, so we have to deal with that in addition to
107 * having two ways of getting the memory.
108 */
109 uint32_t uAlignment = _4M;
110 RTR0MEMOBJ hObj;
111 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
112 if (rc == VERR_NOT_SUPPORTED)
113 {
114 uAlignment = PAGE_SIZE;
115 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
116 }
117 if (rc == VERR_NOT_SUPPORTED)
118 {
119 if (hFictive == NIL_RTR0MEMOBJ)
120 {
121 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M);
122 if (RT_FAILURE(rc))
123 break;
124 hFictive = hObj;
125 }
126 uAlignment = _4M;
127 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
128 if (rc == VERR_NOT_SUPPORTED)
129 {
130 uAlignment = PAGE_SIZE;
131 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
132 }
133 }
134 if (RT_FAILURE(rc))
135 {
136 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
137 rc, cbHypervisor, uAlignment, iTry));
138 fBitched = true;
139 break;
140 }
141
142 /*
143 * Try set it.
144 */
145 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
146 pReq->header.rc = VERR_INTERNAL_ERROR;
147 pReq->hypervisorSize = cbHypervisor;
148 pReq->hypervisorStart = (uintptr_t)RTR0MemObjAddress(hObj);
149 if ( uAlignment == PAGE_SIZE
150 && pReq->hypervisorStart & (_4M - 1))
151 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
152 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
153
154 rc = VbglGRPerform(&pReq->header);
155 if (RT_SUCCESS(rc))
156 {
157 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
158 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
159 RTR0MemObjAddress(pDevExt->hGuestMappings),
160 RTR0MemObjSize(pDevExt->hGuestMappings),
161 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
162 break;
163 }
164 ahTries[iTry] = hObj;
165 }
166
167 /*
168 * Cleanup failed attempts.
169 */
170 while (iTry-- > 0)
171 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
172 if ( RT_FAILURE(rc)
173 && hFictive != NIL_RTR0PTR)
174 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
175 if (RT_FAILURE(rc) && !fBitched)
176 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
177 }
178 VbglGRFree(&pReq->header);
179
180 /*
181 * We ignore failed attempts for now.
182 */
183 return VINF_SUCCESS;
184}
185
186
187/**
188 * Undo what vboxGuestInitFixateGuestMappings did.
189 *
190 * @param pDevExt The device extension.
191 */
192static void vboxGuestTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
193{
194 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
195 {
196 /*
197 * Tell the host that we're going to free the memory we reserved for
198 * it, the free it up. (Leak the memory if anything goes wrong here.)
199 */
200 VMMDevReqHypervisorInfo *pReq;
201 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
202 if (RT_SUCCESS(rc))
203 {
204 pReq->hypervisorStart = 0;
205 pReq->hypervisorSize = 0;
206 rc = VbglGRPerform(&pReq->header);
207 VbglGRFree(&pReq->header);
208 }
209 if (RT_SUCCESS(rc))
210 {
211 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
212 AssertRC(rc);
213 }
214 else
215 LogRel(("vboxGuestTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
216
217 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
218 }
219}
220
221
222/**
223 * Sets the interrupt filter mask during initialization and termination.
224 *
225 * This will ASSUME that we're the ones in carge over the mask, so
226 * we'll simply clear all bits we don't set.
227 *
228 * @returns VBox status code (ignored).
229 * @param pDevExt The device extension.
230 * @param fMask The new mask.
231 */
232static int vboxGuestSetFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask)
233{
234 VMMDevCtlGuestFilterMask *pReq;
235 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
236 if (RT_SUCCESS(rc))
237 {
238 pReq->u32OrMask = fMask;
239 pReq->u32NotMask = ~fMask;
240 rc = VbglGRPerform(&pReq->header);
241 if ( RT_FAILURE(rc)
242 || RT_FAILURE(pReq->header.rc))
243 LogRel(("vboxGuestSetFilterMask: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
244 rc, pReq->header.rc));
245 VbglGRFree(&pReq->header);
246 }
247 return rc;
248}
249
250
251/**
252 * Report guest information to the VMMDev.
253 *
254 * @returns VBox status code.
255 * @param pDevExt The device extension.
256 * @param enmOSType The OS type to report.
257 */
258static int vboxGuestInitReportGuestInfo(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
259{
260 VMMDevReportGuestInfo *pReq;
261 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_ReportGuestInfo);
262 if (RT_SUCCESS(rc))
263 {
264 pReq->guestInfo.additionsVersion = VMMDEV_VERSION;
265 pReq->guestInfo.osType = enmOSType;
266 rc = VbglGRPerform(&pReq->header);
267 if ( RT_FAILURE(rc)
268 || RT_FAILURE(pReq->header.rc))
269 LogRel(("vboxGuestInitReportGuestInfo: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
270 rc, pReq->header.rc));
271 VbglGRFree(&pReq->header);
272 }
273 return rc;
274}
275
276
277/**
278 * Initializes the VBoxGuest device extension when the
279 * device driver is loaded.
280 *
281 * The native code locates the VMMDev on the PCI bus and retrieve
282 * the MMIO and I/O port ranges, this function will take care of
283 * mapping the MMIO memory (if present). Upon successful return
284 * the native code should set up the interrupt handler.
285 *
286 * @returns VBox status code.
287 *
288 * @param pDevExt The device extension. Allocated by the native code.
289 * @param IOPortBase The base of the I/O port range.
290 * @param pvMMIOBase The base of the MMIO memory mapping.
291 * This is optional, pass NULL if not present.
292 * @param cbMMIO The size of the MMIO memory mapping.
293 * This is optional, pass 0 if not present.
294 * @param enmOSType The guest OS type to report to the VMMDev.
295 * @param fFixedEvents Events that will be enabled upon init and no client
296 * will ever be allowed to mask.
297 */
298int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
299 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
300{
301 int rc, rc2;
302
303 /*
304 * Adjust fFixedEvents.
305 */
306#ifdef VBOX_WITH_HGCM
307 fFixedEvents |= VMMDEV_EVENT_HGCM;
308#endif
309
310 /*
311 * Initalize the data.
312 */
313 pDevExt->IOPortBase = IOPortBase;
314 pDevExt->pVMMDevMemory = NULL;
315 pDevExt->fFixedEvents = fFixedEvents;
316 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
317 pDevExt->pIrqAckEvents = NULL;
318 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
319 pDevExt->WaitList.pHead = NULL;
320 pDevExt->WaitList.pTail = NULL;
321#ifdef VBOX_WITH_HGCM
322 pDevExt->HGCMWaitList.pHead = NULL;
323 pDevExt->HGCMWaitList.pTail = NULL;
324#endif
325 pDevExt->FreeList.pHead = NULL;
326 pDevExt->FreeList.pTail = NULL;
327 pDevExt->f32PendingEvents = 0;
328 pDevExt->u32ClipboardClientId = 0;
329 pDevExt->u32MousePosChangedSeq = 0;
330
331 /*
332 * If there is an MMIO region validate the version and size.
333 */
334 if (pvMMIOBase)
335 {
336 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
337 Assert(cbMMIO);
338 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
339 && pVMMDev->u32Size >= 32
340 && pVMMDev->u32Size <= cbMMIO)
341 {
342 pDevExt->pVMMDevMemory = pVMMDev;
343 Log(("VBoxGuestInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
344 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
345 }
346 else /* try live without it. */
347 LogRel(("VBoxGuestInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
348 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
349 }
350
351 /*
352 * Create the wait and seesion spinlocks.
353 */
354 rc = RTSpinlockCreate(&pDevExt->EventSpinlock);
355 if (RT_SUCCESS(rc))
356 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock);
357 if (RT_FAILURE(rc))
358 {
359 Log(("VBoxGuestInitDevExt: failed to spinlock, rc=%d!\n", rc));
360 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
361 RTSpinlockDestroy(pDevExt->EventSpinlock);
362 return rc;
363 }
364
365 /*
366 * Initialize the guest library and report the guest info back to VMMDev,
367 * set the interrupt control filter mask, and fixate the guest mappings
368 * made by the VMM.
369 */
370 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
371 if (RT_SUCCESS(rc))
372 {
373 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
374 if (RT_SUCCESS(rc))
375 {
376 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
377 Assert(pDevExt->PhysIrqAckEvents != 0);
378
379 rc = vboxGuestInitReportGuestInfo(pDevExt, enmOSType);
380 if (RT_SUCCESS(rc))
381 {
382 rc = vboxGuestSetFilterMask(pDevExt, fFixedEvents);
383 if (RT_SUCCESS(rc))
384 {
385 /*
386 * Disable guest graphics capability by default. The guest specific
387 * graphics driver will re-enable this when it is necessary.
388 */
389 rc = VBoxGuestSetGuestCapabilities(0, VMMDEV_GUEST_SUPPORTS_GRAPHICS);
390 if (RT_SUCCESS(rc))
391 {
392 vboxGuestInitFixateGuestMappings(pDevExt);
393 Log(("VBoxGuestInitDevExt: returns success\n"));
394 return VINF_SUCCESS;
395 }
396 }
397 }
398
399 /* failure cleanup */
400 }
401 else
402 Log(("VBoxGuestInitDevExt: VBoxGRAlloc failed, rc=%Rrc\n", rc));
403
404 VbglTerminate();
405 }
406 else
407 Log(("VBoxGuestInitDevExt: VbglInit failed, rc=%Rrc\n", rc));
408
409 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
410 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
411 return rc; /* (failed) */
412}
413
414
415/**
416 * Deletes all the items in a wait chain.
417 * @param pWait The head of the chain.
418 */
419static void VBoxGuestDeleteWaitList(PVBOXGUESTWAITLIST pList)
420{
421 while (pList->pHead)
422 {
423 int rc2;
424 PVBOXGUESTWAIT pWait = pList->pHead;
425 pList->pHead = pWait->pNext;
426
427 pWait->pNext = NULL;
428 pWait->pPrev = NULL;
429 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
430 pWait->Event = NIL_RTSEMEVENTMULTI;
431 pWait->pSession = NULL;
432 RTMemFree(pWait);
433 }
434 pList->pHead = NULL;
435 pList->pTail = NULL;
436}
437
438
439/**
440 * Destroys the VBoxGuest device extension.
441 *
442 * The native code should call this before the driver is loaded,
443 * but don't call this on shutdown.
444 *
445 * @param pDevExt The device extension.
446 */
447void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
448{
449 int rc2;
450 Log(("VBoxGuestDeleteDevExt:\n"));
451 LogRel(("VBoxGuest: The additions driver is terminating.\n"));
452
453 /*
454 * Unfix the guest mappings, filter all events and clear
455 * all capabilities.
456 */
457 vboxGuestTermUnfixGuestMappings(pDevExt);
458 VBoxGuestSetGuestCapabilities(0, UINT32_MAX);
459 vboxGuestSetFilterMask(pDevExt, 0);
460
461 /*
462 * Cleanup resources.
463 */
464 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
465 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
466
467 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
468#ifdef VBOX_WITH_HGCM
469 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
470#endif
471 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
472
473 VbglTerminate();
474
475 pDevExt->pVMMDevMemory = NULL;
476
477 pDevExt->IOPortBase = 0;
478 pDevExt->pIrqAckEvents = NULL;
479}
480
481
482/**
483 * Creates a VBoxGuest user session.
484 *
485 * The native code calls this when a ring-3 client opens the device.
486 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
487 *
488 * @returns VBox status code.
489 * @param pDevExt The device extension.
490 * @param ppSession Where to store the session on success.
491 */
492int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
493{
494 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
495 if (RT_UNLIKELY(!pSession))
496 {
497 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
498 return VERR_NO_MEMORY;
499 }
500
501 pSession->Process = RTProcSelf();
502 pSession->R0Process = RTR0ProcHandleSelf();
503 pSession->pDevExt = pDevExt;
504
505 *ppSession = pSession;
506 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
507 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
508 return VINF_SUCCESS;
509}
510
511
512/**
513 * Creates a VBoxGuest kernel session.
514 *
515 * The native code calls this when a ring-0 client connects to the device.
516 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
517 *
518 * @returns VBox status code.
519 * @param pDevExt The device extension.
520 * @param ppSession Where to store the session on success.
521 */
522int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
523{
524 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
525 if (RT_UNLIKELY(!pSession))
526 {
527 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
528 return VERR_NO_MEMORY;
529 }
530
531 pSession->Process = NIL_RTPROCESS;
532 pSession->R0Process = NIL_RTR0PROCESS;
533 pSession->pDevExt = pDevExt;
534
535 *ppSession = pSession;
536 LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
537 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
538 return VINF_SUCCESS;
539}
540
541
542
543/**
544 * Closes a VBoxGuest session.
545 *
546 * @param pDevExt The device extension.
547 * @param pSession The session to close (and free).
548 */
549void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
550{
551 unsigned i; NOREF(i);
552 Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
553 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
554
555#ifdef VBOX_WITH_HGCM
556 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
557 if (pSession->aHGCMClientIds[i])
558 {
559 VBoxGuestHGCMDisconnectInfo Info;
560 Info.result = 0;
561 Info.u32ClientID = pSession->aHGCMClientIds[i];
562 pSession->aHGCMClientIds[i] = 0;
563 Log(("VBoxGuestCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
564 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
565 }
566#endif
567
568 pSession->pDevExt = NULL;
569 pSession->Process = NIL_RTPROCESS;
570 pSession->R0Process = NIL_RTR0PROCESS;
571 RTMemFree(pSession);
572}
573
574
575/**
576 * Links the wait-for-event entry into the tail of the given list.
577 *
578 * @param pList The list to link it into.
579 * @param pWait The wait for event entry to append.
580 */
581DECLINLINE(void) VBoxGuestWaitAppend(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
582{
583 const PVBOXGUESTWAIT pTail = pList->pTail;
584 pWait->pNext = NULL;
585 pWait->pPrev = pTail;
586 if (pTail)
587 pTail->pNext = pWait;
588 else
589 pList->pHead = pWait;
590 pList->pTail = pWait;
591}
592
593
594/**
595 * Unlinks the wait-for-event entry.
596 *
597 * @param pList The list to unlink it from.
598 * @param pWait The wait for event entry to unlink.
599 */
600DECLINLINE(void) VBoxGuestWaitUnlink(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
601{
602 const PVBOXGUESTWAIT pPrev = pWait->pPrev;
603 const PVBOXGUESTWAIT pNext = pWait->pNext;
604 if (pNext)
605 pNext->pPrev = pPrev;
606 else
607 pList->pTail = pPrev;
608 if (pPrev)
609 pPrev->pNext = pNext;
610 else
611 pList->pHead = pNext;
612}
613
614
615/**
616 * Allocates a wiat-for-event entry.
617 *
618 * @returns The wait-for-event entry.
619 * @param pDevExt The device extension.
620 * @param pSession The session that's allocating this. Can be NULL.
621 */
622static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
623{
624 /*
625 * Allocate it one way or the other.
626 */
627 PVBOXGUESTWAIT pWait = pDevExt->FreeList.pTail;
628 if (pWait)
629 {
630 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
631 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
632
633 pWait = pDevExt->FreeList.pTail;
634 if (pWait)
635 VBoxGuestWaitUnlink(&pDevExt->FreeList, pWait);
636
637 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
638 }
639 if (!pWait)
640 {
641 static unsigned s_cErrors = 0;
642 int rc;
643
644 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
645 if (!pWait)
646 {
647 if (s_cErrors++ < 32)
648 LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
649 return NULL;
650 }
651
652 rc = RTSemEventMultiCreate(&pWait->Event);
653 if (RT_FAILURE(rc))
654 {
655 if (s_cErrors++ < 32)
656 LogRel(("VBoxGuestCommonIOCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
657 RTMemFree(pWait);
658 return NULL;
659 }
660 }
661
662 /*
663 * Zero members just as an precaution.
664 */
665 pWait->pNext = NULL;
666 pWait->pPrev = NULL;
667 pWait->fReqEvents = 0;
668 pWait->fResEvents = 0;
669 pWait->pSession = pSession;
670#ifdef VBOX_WITH_HGCM
671 pWait->pHGCMReq = NULL;
672#endif
673 RTSemEventMultiReset(pWait->Event);
674 return pWait;
675}
676
677
678/**
679 * Frees the wait-for-event entry.
680 * The caller must own the wait spinlock!
681 *
682 * @param pDevExt The device extension.
683 * @param pWait The wait-for-event entry to free.
684 */
685static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
686{
687 pWait->fReqEvents = 0;
688 pWait->fResEvents = 0;
689#ifdef VBOX_WITH_HGCM
690 pWait->pHGCMReq = NULL;
691#endif
692 VBoxGuestWaitAppend(&pDevExt->FreeList, pWait);
693}
694
695
696/**
697 * Frees the wait-for-event entry.
698 *
699 * @param pDevExt The device extension.
700 * @param pWait The wait-for-event entry to free.
701 */
702static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
703{
704 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
705 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
706 VBoxGuestWaitFreeLocked(pDevExt, pWait);
707 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
708}
709
710
711/**
712 * Modifies the guest capabilities.
713 *
714 * Should be called during driver init and termination.
715 *
716 * @returns VBox status code.
717 * @param fOr The Or mask (what to enable).
718 * @param fNot The Not mask (what to disable).
719 */
720int VBoxGuestSetGuestCapabilities(uint32_t fOr, uint32_t fNot)
721{
722 VMMDevReqGuestCapabilities2 *pReq;
723 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
724 if (RT_FAILURE(rc))
725 {
726 Log(("VBoxGuestSetGuestCapabilities: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
727 sizeof(*pReq), sizeof(*pReq), rc));
728 return rc;
729 }
730
731 pReq->u32OrMask = fOr;
732 pReq->u32NotMask = fNot;
733
734 rc = VbglGRPerform(&pReq->header);
735 if (RT_FAILURE(rc))
736 Log(("VBoxGuestSetGuestCapabilities:VbglGRPerform failed, rc=%Rrc!\n", rc));
737 else if (RT_FAILURE(pReq->header.rc))
738 {
739 Log(("VBoxGuestSetGuestCapabilities: The request failed; VMMDev rc=%Rrc!\n", pReq->header.rc));
740 rc = pReq->header.rc;
741 }
742
743 VbglGRFree(&pReq->header);
744 return rc;
745}
746
747
748/**
749 * Implements the fast (no input or output) type of IOCtls.
750 *
751 * This is currently just a placeholder stub inherited from the support driver code.
752 *
753 * @returns VBox status code.
754 * @param iFunction The IOCtl function number.
755 * @param pDevExt The device extension.
756 * @param pSession The session.
757 */
758int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
759{
760 Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
761
762 return VERR_NOT_SUPPORTED;
763}
764
765
766
767static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
768{
769 Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
770 pInfo->portAddress = pDevExt->IOPortBase;
771 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
772 if (pcbDataReturned)
773 *pcbDataReturned = sizeof(*pInfo);
774 return VINF_SUCCESS;
775}
776
777
778/**
779 * Worker VBoxGuestCommonIOCtl_WaitEvent.
780 * The caller enters the spinlock, we may or may not leave it.
781 *
782 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
783 */
784DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWaitEventInfo *pInfo,
785 int iEvent, const uint32_t fReqEvents, PRTSPINLOCKTMP pTmp)
786{
787 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
788 if (fMatches)
789 {
790 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
791 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, pTmp);
792
793 pInfo->u32EventFlagsOut = fMatches;
794 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
795 if (fReqEvents & ~((uint32_t)1 << iEvent))
796 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
797 else
798 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
799 return VINF_SUCCESS;
800 }
801 return VERR_TIMEOUT;
802}
803
804
805static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
806 VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned, bool fInterruptible)
807{
808 pInfo->u32EventFlagsOut = 0;
809 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
810 if (pcbDataReturned)
811 *pcbDataReturned = sizeof(*pInfo);
812
813 /*
814 * Copy and verify the input mask.
815 */
816 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
817 int iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
818 if (RT_UNLIKELY(iEvent < 0))
819 {
820 Log(("VBoxGuestCommonIOCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
821 return VERR_INVALID_PARAMETER;
822 }
823
824 /*
825 * Check the condition up front, before doing the wait-for-event allocations.
826 */
827 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
828 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
829 int rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
830 if (rc == VINF_SUCCESS)
831 return rc;
832 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
833
834 if (!pInfo->u32TimeoutIn)
835 {
836 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
837 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
838 return VERR_TIMEOUT;
839 }
840
841 PVBOXGUESTWAIT pWait = VBoxGuestWaitAlloc(pDevExt, pSession);
842 if (!pWait)
843 return VERR_NO_MEMORY;
844 pWait->fReqEvents = fReqEvents;
845
846 /*
847 * We've got the wait entry now, re-enter the spinlock and check for the condition.
848 * If the wait condition is met, return.
849 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
850 */
851 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
852 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
853 if (rc == VINF_SUCCESS)
854 {
855 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
856 return rc;
857 }
858 VBoxGuestWaitAppend(&pDevExt->WaitList, pWait);
859 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
860
861 if (fInterruptible)
862 rc = RTSemEventMultiWaitNoResume(pWait->Event,
863 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
864 else
865 rc = RTSemEventMultiWait(pWait->Event,
866 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
867
868 /*
869 * There is one special case here and that's when the semaphore is
870 * destroyed upon device driver unload. This shouldn't happen of course,
871 * but in case it does, just get out of here ASAP.
872 */
873 if (rc == VERR_SEM_DESTROYED)
874 return rc;
875
876 /*
877 * Unlink the wait item and dispose of it.
878 */
879 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
880 VBoxGuestWaitUnlink(&pDevExt->WaitList, pWait);
881 const uint32_t fResEvents = pWait->fResEvents;
882 VBoxGuestWaitFreeLocked(pDevExt, pWait);
883 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
884
885 /*
886 * Now deal with the return code.
887 */
888 if ( fResEvents
889 && fResEvents != UINT32_MAX)
890 {
891 pInfo->u32EventFlagsOut = fResEvents;
892 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
893 if (fReqEvents & ~((uint32_t)1 << iEvent))
894 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
895 else
896 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
897 rc = VINF_SUCCESS;
898 }
899 else if ( fResEvents == UINT32_MAX
900 || rc == VERR_INTERRUPTED)
901 {
902 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
903 rc == VERR_INTERRUPTED;
904 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
905 }
906 else if (rc == VERR_TIMEOUT)
907 {
908 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
909 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
910 }
911 else
912 {
913 if (RT_SUCCESS(rc))
914 {
915 static unsigned s_cErrors = 0;
916 if (s_cErrors++ < 32)
917 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
918 rc = VERR_INTERNAL_ERROR;
919 }
920 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
921 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc\n", rc));
922 }
923
924 return rc;
925}
926
927
928static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
929{
930 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
931#if defined(RT_OS_SOLARIS)
932 RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER;
933#endif
934 PVBOXGUESTWAIT pWait;
935 int rc = 0;
936
937 Log(("VBoxGuestCommonIOCtl: CANCEL_ALL_WAITEVENTS\n"));
938
939 /*
940 * Walk the event list and wake up anyone with a matching session.
941 *
942 * Note! On Solaris we have to do really ugly stuff here because
943 * RTSemEventMultiSignal cannot be called with interrupts disabled.
944 * The hack is racy, but what we can we do... (Eliminate this
945 * termination hack, perhaps?)
946 */
947#if defined(RT_OS_SOLARIS)
948 RTThreadPreemptDisable(&State);
949 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
950 do
951 {
952 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
953 if ( pWait->pSession == pSession
954 && pWait->fResEvents != UINT32_MAX)
955 {
956 RTSEMEVENTMULTI hEvent;
957 pWait->fResEvents = UINT32_MAX;
958 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
959 /* HACK ALRET! This races wakeup + reuse! */
960 rc |= RTSemEventMultiSignal(hEvent);
961 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
962 break;
963 }
964 } while (pWait);
965 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
966 RTThreadPreemptDisable(&State);
967#else
968 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
969 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
970 if (pWait->pSession == pSession)
971 {
972 pWait->fResEvents = UINT32_MAX;
973 rc |= RTSemEventMultiSignal(pWait->Event);
974 }
975 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
976#endif
977 Assert(rc == 0);
978
979 return VINF_SUCCESS;
980}
981
982
983static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
984 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
985{
986 Log(("VBoxGuestCommonIOCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
987
988 /*
989 * Validate the header and request size.
990 */
991 const VMMDevRequestType enmType = pReqHdr->requestType;
992 const uint32_t cbReq = pReqHdr->size;
993 const uint32_t cbMinSize = vmmdevGetRequestSize(enmType);
994 if (cbReq < cbMinSize)
995 {
996 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
997 cbReq, cbMinSize, enmType));
998 return VERR_INVALID_PARAMETER;
999 }
1000 if (cbReq > cbData)
1001 {
1002 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
1003 cbData, cbReq, enmType));
1004 return VERR_INVALID_PARAMETER;
1005 }
1006 int rc = VbglGRVerify(pReqHdr, cbData);
1007 if (RT_FAILURE(rc))
1008 {
1009 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc %d!!\n",
1010 cbData, cbReq, enmType, rc));
1011 return rc;
1012 }
1013
1014 /*
1015 * Make a copy of the request in the physical memory heap so
1016 * the VBoxGuestLibrary can more easily deal with the request.
1017 * (This is really a waste of time since the OS or the OS specific
1018 * code has already buffered or locked the input/output buffer, but
1019 * it does makes things a bit simpler wrt to phys address.)
1020 */
1021 VMMDevRequestHeader *pReqCopy;
1022 rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
1023 if (RT_FAILURE(rc))
1024 {
1025 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
1026 cbReq, cbReq, rc));
1027 return rc;
1028 }
1029 memcpy(pReqCopy, pReqHdr, cbReq);
1030
1031 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
1032 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
1033
1034 rc = VbglGRPerform(pReqCopy);
1035 if ( RT_SUCCESS(rc)
1036 && RT_SUCCESS(pReqCopy->rc))
1037 {
1038 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
1039 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
1040
1041 memcpy(pReqHdr, pReqCopy, cbReq);
1042 if (pcbDataReturned)
1043 *pcbDataReturned = cbReq;
1044 }
1045 else if (RT_FAILURE(rc))
1046 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
1047 else
1048 {
1049 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
1050 rc = pReqCopy->rc;
1051 }
1052
1053 VbglGRFree(pReqCopy);
1054 return rc;
1055}
1056
1057
1058static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, VBoxGuestFilterMaskInfo *pInfo)
1059{
1060 VMMDevCtlGuestFilterMask *pReq;
1061 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
1062 if (RT_FAILURE(rc))
1063 {
1064 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
1065 sizeof(*pReq), sizeof(*pReq), rc));
1066 return rc;
1067 }
1068
1069 pReq->u32OrMask = pInfo->u32OrMask;
1070 pReq->u32NotMask = pInfo->u32NotMask;
1071 pReq->u32NotMask &= ~pDevExt->fFixedEvents; /* don't permit these to be cleared! */
1072 rc = VbglGRPerform(&pReq->header);
1073 if (RT_FAILURE(rc))
1074 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: VbglGRPerform failed, rc=%Rrc!\n", rc));
1075 else if (RT_FAILURE(pReq->header.rc))
1076 {
1077 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: The request failed; VMMDev rc=%Rrc!\n", pReq->header.rc));
1078 rc = pReq->header.rc;
1079 }
1080
1081 VbglGRFree(&pReq->header);
1082 return rc;
1083}
1084
1085#ifdef VBOX_WITH_HGCM
1086
1087AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
1088
1089/** Worker for VBoxGuestHGCMAsyncWaitCallback*. */
1090static int VBoxGuestHGCMAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
1091 bool fInterruptible, uint32_t cMillies)
1092{
1093
1094 /*
1095 * Check to see if the condition was met by the time we got here.
1096 *
1097 * We create a simple poll loop here for dealing with out-of-memory
1098 * conditions since the caller isn't necessarily able to deal with
1099 * us returning too early.
1100 */
1101 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1102 PVBOXGUESTWAIT pWait;
1103 for (;;)
1104 {
1105 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1106 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1107 {
1108 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1109 return VINF_SUCCESS;
1110 }
1111 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1112
1113 pWait = VBoxGuestWaitAlloc(pDevExt, NULL);
1114 if (pWait)
1115 break;
1116 if (fInterruptible)
1117 return VERR_INTERRUPTED;
1118 RTThreadSleep(1);
1119 }
1120 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
1121 pWait->pHGCMReq = pHdr;
1122
1123 /*
1124 * Re-enter the spinlock and re-check for the condition.
1125 * If the condition is met, return.
1126 * Otherwise link us into the HGCM wait list and go to sleep.
1127 */
1128 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1129 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1130 {
1131 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1132 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1133 return VINF_SUCCESS;
1134 }
1135 VBoxGuestWaitAppend(&pDevExt->HGCMWaitList, pWait);
1136 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1137
1138 int rc;
1139 if (fInterruptible)
1140 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
1141 else
1142 rc = RTSemEventMultiWait(pWait->Event, cMillies);
1143 if (rc == VERR_SEM_DESTROYED)
1144 return rc;
1145
1146 /*
1147 * Unlink, free and return.
1148 */
1149 if ( RT_FAILURE(rc)
1150 && rc != VERR_TIMEOUT
1151 && ( !fInterruptible
1152 || rc != VERR_INTERRUPTED))
1153 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
1154
1155 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1156 VBoxGuestWaitUnlink(&pDevExt->HGCMWaitList, pWait);
1157 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1158 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1159 return rc;
1160}
1161
1162
1163/**
1164 * This is a callback for dealing with async waits.
1165 *
1166 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1167 */
1168static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
1169{
1170 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1171 Log(("VBoxGuestHGCMAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
1172 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1173 pDevExt,
1174 false /* fInterruptible */,
1175 u32User /* cMillies */);
1176}
1177
1178
1179/**
1180 * This is a callback for dealing with async waits with a timeout.
1181 *
1182 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1183 */
1184static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr,
1185 void *pvUser, uint32_t u32User)
1186{
1187 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1188 Log(("VBoxGuestHGCMAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
1189 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1190 pDevExt,
1191 true /* fInterruptible */,
1192 u32User /* cMillies */ );
1193
1194}
1195
1196
1197static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMConnectInfo *pInfo,
1198 size_t *pcbDataReturned)
1199{
1200 /*
1201 * The VbglHGCMConnect call will invoke the callback if the HGCM
1202 * call is performed in an ASYNC fashion. The function is not able
1203 * to deal with cancelled requests.
1204 */
1205 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
1206 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
1207 ? pInfo->Loc.u.host.achName : "<not local host>"));
1208
1209 int rc = VbglR0HGCMInternalConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1210 if (RT_SUCCESS(rc))
1211 {
1212 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
1213 pInfo->u32ClientID, pInfo->result, rc));
1214 if (RT_SUCCESS(pInfo->result))
1215 {
1216 /*
1217 * Append the client id to the client id table.
1218 * If the table has somehow become filled up, we'll disconnect the session.
1219 */
1220 unsigned i;
1221 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1222 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1223 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1224 if (!pSession->aHGCMClientIds[i])
1225 {
1226 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
1227 break;
1228 }
1229 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1230 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1231 {
1232 static unsigned s_cErrors = 0;
1233 if (s_cErrors++ < 32)
1234 LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
1235
1236 VBoxGuestHGCMDisconnectInfo Info;
1237 Info.result = 0;
1238 Info.u32ClientID = pInfo->u32ClientID;
1239 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1240 return VERR_TOO_MANY_OPEN_FILES;
1241 }
1242 }
1243 if (pcbDataReturned)
1244 *pcbDataReturned = sizeof(*pInfo);
1245 }
1246 return rc;
1247}
1248
1249
1250static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
1251 size_t *pcbDataReturned)
1252{
1253 /*
1254 * Validate the client id and invalidate its entry while we're in the call.
1255 */
1256 const uint32_t u32ClientId = pInfo->u32ClientID;
1257 unsigned i;
1258 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1259 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1260 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1261 if (pSession->aHGCMClientIds[i] == u32ClientId)
1262 {
1263 pSession->aHGCMClientIds[i] = UINT32_MAX;
1264 break;
1265 }
1266 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1267 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1268 {
1269 static unsigned s_cErrors = 0;
1270 if (s_cErrors++ > 32)
1271 LogRel(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
1272 return VERR_INVALID_HANDLE;
1273 }
1274
1275 /*
1276 * The VbglHGCMConnect call will invoke the callback if the HGCM
1277 * call is performed in an ASYNC fashion. The function is not able
1278 * to deal with cancelled requests.
1279 */
1280 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
1281 int rc = VbglR0HGCMInternalDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1282 if (RT_SUCCESS(rc))
1283 {
1284 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
1285 if (pcbDataReturned)
1286 *pcbDataReturned = sizeof(*pInfo);
1287 }
1288
1289 /* Update the client id array according to the result. */
1290 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1291 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
1292 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
1293 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1294
1295 return rc;
1296}
1297
1298
1299static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt,
1300 PVBOXGUESTSESSION pSession,
1301 VBoxGuestHGCMCallInfo *pInfo,
1302 uint32_t cMillies, bool fInterruptible, bool f32bit,
1303 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
1304{
1305 /*
1306 * Some more validations.
1307 */
1308 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
1309 {
1310 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
1311 return VERR_INVALID_PARAMETER;
1312 }
1313 size_t cbActual = cbExtra + sizeof(*pInfo);
1314#ifdef RT_ARCH_AMD64
1315 if (f32bit)
1316 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
1317 else
1318#endif
1319 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
1320 if (cbData < cbActual)
1321 {
1322 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
1323 cbData, cbActual));
1324 return VERR_INVALID_PARAMETER;
1325 }
1326
1327 /*
1328 * Validate the client id.
1329 */
1330 const uint32_t u32ClientId = pInfo->u32ClientID;
1331 unsigned i;
1332 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1333 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1334 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1335 if (pSession->aHGCMClientIds[i] == u32ClientId)
1336 break;
1337 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1338 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
1339 {
1340 static unsigned s_cErrors = 0;
1341 if (s_cErrors++ > 32)
1342 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
1343 return VERR_INVALID_HANDLE;
1344 }
1345
1346 /*
1347 * The VbglHGCMCall call will invoke the callback if the HGCM
1348 * call is performed in an ASYNC fashion. This function can
1349 * deal with cancelled requests, so we let user more requests
1350 * be interruptible (should add a flag for this later I guess).
1351 */
1352 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
1353 int rc;
1354 uint32_t fFlags = pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
1355#ifdef RT_ARCH_AMD64
1356 if (f32bit)
1357 {
1358 if (fInterruptible)
1359 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1360 else
1361 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1362 }
1363 else
1364#endif
1365 {
1366 if (fInterruptible)
1367 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1368 else
1369 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1370 }
1371 if (RT_SUCCESS(rc))
1372 {
1373 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
1374 if (pcbDataReturned)
1375 *pcbDataReturned = cbActual;
1376 }
1377 else
1378 {
1379 if (rc != VERR_INTERRUPTED)
1380 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
1381 else
1382 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
1383 }
1384 return rc;
1385}
1386
1387
1388/**
1389 * @returns VBox status code. Unlike the other HGCM IOCtls this will combine
1390 * the VbglHGCMConnect/Disconnect return code with the Info.result.
1391 */
1392static int VBoxGuestCommonIOCtl_HGCMClipboardReConnect(PVBOXGUESTDEVEXT pDevExt, uint32_t *pu32ClientId, size_t *pcbDataReturned)
1393{
1394 int rc;
1395 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: Current u32ClientId=%RX32\n", pDevExt->u32ClipboardClientId));
1396
1397
1398 /*
1399 * If there is an old client, try disconnect it first.
1400 */
1401 if (pDevExt->u32ClipboardClientId != 0)
1402 {
1403 VBoxGuestHGCMDisconnectInfo Info;
1404 Info.result = VERR_WRONG_ORDER;
1405 Info.u32ClientID = pDevExt->u32ClipboardClientId;
1406 rc = VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1407 if (RT_SUCCESS(rc))
1408 {
1409 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. VbglHGCMDisconnect -> rc=%Rrc\n", rc));
1410 return rc;
1411 }
1412 if (RT_FAILURE((int32_t)Info.result))
1413 {
1414 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. Info.result=%Rrc\n", rc));
1415 return Info.result;
1416 }
1417 pDevExt->u32ClipboardClientId = 0;
1418 }
1419
1420 /*
1421 * Try connect.
1422 */
1423 VBoxGuestHGCMConnectInfo Info;
1424 Info.Loc.type = VMMDevHGCMLoc_LocalHost_Existing;
1425 strcpy(Info.Loc.u.host.achName, "VBoxSharedClipboard");
1426 Info.u32ClientID = 0;
1427 Info.result = VERR_WRONG_ORDER;
1428
1429 rc = VbglR0HGCMInternalConnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1430 if (RT_FAILURE(rc))
1431 {
1432 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1433 return rc;
1434 }
1435 if (RT_FAILURE(Info.result))
1436 {
1437 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1438 return rc;
1439 }
1440
1441 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: connected successfully u32ClientId=%RX32\n", Info.u32ClientID));
1442
1443 pDevExt->u32ClipboardClientId = Info.u32ClientID;
1444 *pu32ClientId = Info.u32ClientID;
1445 if (pcbDataReturned)
1446 *pcbDataReturned = sizeof(uint32_t);
1447
1448 return VINF_SUCCESS;
1449}
1450
1451#endif /* VBOX_WITH_HGCM */
1452
1453/**
1454 * Guest backdoor logging.
1455 *
1456 * @returns VBox status code.
1457 *
1458 * @param pch The log message (need not be NULL terminated).
1459 * @param cbData Size of the buffer.
1460 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
1461 */
1462static int VBoxGuestCommonIOCtl_Log(const char *pch, size_t cbData, size_t *pcbDataReturned)
1463{
1464 Log(("%.*s", cbData, pch));
1465 if (pcbDataReturned)
1466 *pcbDataReturned = 0;
1467 return VINF_SUCCESS;
1468}
1469
1470
1471/**
1472 * Common IOCtl for user to kernel and kernel to kernel communcation.
1473 *
1474 * This function only does the basic validation and then invokes
1475 * worker functions that takes care of each specific function.
1476 *
1477 * @returns VBox status code.
1478 *
1479 * @param iFunction The requested function.
1480 * @param pDevExt The device extension.
1481 * @param pSession The client session.
1482 * @param pvData The input/output data buffer. Can be NULL depending on the function.
1483 * @param cbData The max size of the data buffer.
1484 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
1485 */
1486int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1487 void *pvData, size_t cbData, size_t *pcbDataReturned)
1488{
1489 Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
1490 iFunction, pDevExt, pSession, pvData, cbData));
1491
1492 /*
1493 * Make sure the returned data size is set to zero.
1494 */
1495 if (pcbDataReturned)
1496 *pcbDataReturned = 0;
1497
1498 /*
1499 * Define some helper macros to simplify validation.
1500 */
1501#define CHECKRET_RING0(mnemonic) \
1502 do { \
1503 if (pSession->R0Process != NIL_RTR0PROCESS) \
1504 { \
1505 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
1506 pSession->Process, (uintptr_t)pSession->R0Process)); \
1507 return VERR_PERMISSION_DENIED; \
1508 } \
1509 } while (0)
1510#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
1511 do { \
1512 if (cbData < (cbMin)) \
1513 { \
1514 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
1515 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
1516 return VERR_BUFFER_OVERFLOW; \
1517 } \
1518 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
1519 { \
1520 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": Invalid pointer %p\n", pvData)); \
1521 return VERR_INVALID_POINTER; \
1522 } \
1523 } while (0)
1524
1525
1526 /*
1527 * Deal with variably sized requests first.
1528 */
1529 int rc = VINF_SUCCESS;
1530 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
1531 {
1532 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
1533 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
1534 }
1535#ifdef VBOX_WITH_HGCM
1536 /*
1537 * These ones are a bit tricky.
1538 */
1539 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
1540 {
1541 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
1542 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
1543 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
1544 fInterruptible, false /*f32bit*/,
1545 0, cbData, pcbDataReturned);
1546 }
1547 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
1548 {
1549 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
1550 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
1551 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
1552 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
1553 false /*f32bit*/,
1554 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
1555 }
1556# ifdef RT_ARCH_AMD64
1557 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
1558 {
1559 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
1560 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
1561 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
1562 fInterruptible, true /*f32bit*/,
1563 0, cbData, pcbDataReturned);
1564 }
1565 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
1566 {
1567 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
1568 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
1569 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
1570 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
1571 true /*f32bit*/,
1572 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
1573 }
1574# endif
1575#endif /* VBOX_WITH_HGCM */
1576 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
1577 {
1578 CHECKRET_MIN_SIZE("LOG", 1);
1579 rc = VBoxGuestCommonIOCtl_Log((char *)pvData, cbData, pcbDataReturned);
1580 }
1581 else
1582 {
1583 switch (iFunction)
1584 {
1585 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
1586 CHECKRET_RING0("GETVMMDEVPORT");
1587 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
1588 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
1589 break;
1590
1591 case VBOXGUEST_IOCTL_WAITEVENT:
1592 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
1593 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, pSession, (VBoxGuestWaitEventInfo *)pvData,
1594 pcbDataReturned, pSession->R0Process != NIL_RTR0PROCESS);
1595 break;
1596
1597 case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
1598 if (cbData != 0)
1599 rc = VERR_INVALID_PARAMETER;
1600 rc = VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
1601 break;
1602
1603 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
1604 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
1605 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, (VBoxGuestFilterMaskInfo *)pvData);
1606 break;
1607
1608#ifdef VBOX_WITH_HGCM
1609 case VBOXGUEST_IOCTL_HGCM_CONNECT:
1610# ifdef RT_ARCH_AMD64
1611 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
1612# endif
1613 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
1614 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
1615 break;
1616
1617 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
1618# ifdef RT_ARCH_AMD64
1619 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
1620# endif
1621 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
1622 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
1623 break;
1624
1625 case VBOXGUEST_IOCTL_CLIPBOARD_CONNECT:
1626 CHECKRET_MIN_SIZE("CLIPBOARD_CONNECT", sizeof(uint32_t));
1627 rc = VBoxGuestCommonIOCtl_HGCMClipboardReConnect(pDevExt, (uint32_t *)pvData, pcbDataReturned);
1628 break;
1629#endif /* VBOX_WITH_HGCM */
1630
1631 default:
1632 {
1633 LogRel(("VBoxGuestCommonIOCtl: Unknown request iFunction=%#x Stripped size=%#x\n", iFunction,
1634 VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
1635 rc = VERR_NOT_SUPPORTED;
1636 break;
1637 }
1638 }
1639 }
1640
1641 Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
1642 return rc;
1643}
1644
1645
1646
1647/**
1648 * Common interrupt service routine.
1649 *
1650 * This deals with events and with waking up thread waiting for those events.
1651 *
1652 * @returns true if it was our interrupt, false if it wasn't.
1653 * @param pDevExt The VBoxGuest device extension.
1654 */
1655bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
1656{
1657 bool fMousePositionChanged = false;
1658 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1659 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
1660 int rc = 0;
1661 bool fOurIrq;
1662
1663 /*
1664 * Make sure we've initalized the device extension.
1665 */
1666 if (RT_UNLIKELY(!pReq))
1667 return false;
1668
1669 /*
1670 * Enter the spinlock and check if it's our IRQ or not.
1671 *
1672 * Note! Solaris cannot do RTSemEventMultiSignal with interrupts disabled
1673 * so we're entering the spinlock without disabling them. This works
1674 * fine as long as we never called in a nested fashion.
1675 */
1676#if defined(RT_OS_SOLARIS)
1677 RTSpinlockAcquire(pDevExt->EventSpinlock, &Tmp);
1678#else
1679 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1680#endif
1681 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
1682 if (fOurIrq)
1683 {
1684 /*
1685 * Acknowlegde events.
1686 * We don't use VbglGRPerform here as it may take another spinlocks.
1687 */
1688 pReq->header.rc = VERR_INTERNAL_ERROR;
1689 pReq->events = 0;
1690 ASMCompilerBarrier();
1691 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
1692 ASMCompilerBarrier(); /* paranoia */
1693 if (RT_SUCCESS(pReq->header.rc))
1694 {
1695 uint32_t fEvents = pReq->events;
1696 PVBOXGUESTWAIT pWait;
1697
1698 Log(("VBoxGuestCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
1699
1700 /*
1701 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
1702 */
1703 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
1704 {
1705 fMousePositionChanged = true;
1706 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
1707 }
1708
1709#ifdef VBOX_WITH_HGCM
1710 /*
1711 * The HGCM event/list is kind of different in that we evaluate all entries.
1712 */
1713 if (fEvents & VMMDEV_EVENT_HGCM)
1714 {
1715 for (pWait = pDevExt->HGCMWaitList.pHead; pWait; pWait = pWait->pNext)
1716 if ( !pWait->fResEvents
1717 && (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE))
1718 {
1719 pWait->fResEvents = VMMDEV_EVENT_HGCM;
1720 rc |= RTSemEventMultiSignal(pWait->Event);
1721 }
1722 fEvents &= ~VMMDEV_EVENT_HGCM;
1723 }
1724#endif
1725
1726 /*
1727 * Normal FIFO waiter evaluation.
1728 */
1729 fEvents |= pDevExt->f32PendingEvents;
1730 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
1731 if ( (pWait->fReqEvents & fEvents)
1732 && !pWait->fResEvents)
1733 {
1734 pWait->fResEvents = pWait->fReqEvents & fEvents;
1735 fEvents &= ~pWait->fResEvents;
1736 rc |= RTSemEventMultiSignal(pWait->Event);
1737 if (!fEvents)
1738 break;
1739 }
1740 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
1741 }
1742 else /* something is serious wrong... */
1743 Log(("VBoxGuestCommonISR: acknowledge events failed rc=%d (events=%#x)!!\n",
1744 pReq->header.rc, pReq->events));
1745 }
1746 else
1747 LogFlow(("VBoxGuestCommonISR: not ours\n"));
1748
1749 /*
1750 * Work the poll and async notification queues on OSes that implements that.
1751 * Do this outside the spinlock to prevent some recursive spinlocking.
1752 */
1753#if defined(RT_OS_SOLARIS)
1754 RTSpinlockRelease(pDevExt->EventSpinlock, &Tmp);
1755#else
1756 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1757#endif
1758
1759 if (fMousePositionChanged)
1760 {
1761 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
1762 VBoxGuestNativeISRMousePollEvent(pDevExt);
1763 }
1764
1765 Assert(rc == 0);
1766 return fOurIrq;
1767}
1768
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette