VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 21524

Last change on this file since 21524 was 21524, checked in by vboxsync, 15 years ago

common/VBoxGuest: minor log fix.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 60.3 KB
Line 
1/* $Id: VBoxGuest.cpp 21524 2009-07-13 09:29:24Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2009 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23
24/*******************************************************************************
25* Header Files *
26*******************************************************************************/
27#define LOG_GROUP LOG_GROUP_DEFAULT
28#include "VBoxGuestInternal.h"
29#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
30#include <VBox/log.h>
31#include <iprt/mem.h>
32#include <iprt/time.h>
33#include <iprt/memobj.h>
34#include <iprt/asm.h>
35#include <iprt/string.h>
36#include <iprt/process.h>
37#include <iprt/assert.h>
38#include <iprt/param.h>
39#ifdef VBOX_WITH_HGCM
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Internal Functions *
46*******************************************************************************/
47#ifdef VBOX_WITH_HGCM
48static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
49#endif
50
51
52
53/**
54 * Reserves memory in which the VMM can relocate any guest mappings
55 * that are floating around.
56 *
57 * This operation is a little bit tricky since the VMM might not accept
58 * just any address because of address clashes between the three contexts
59 * it operates in, so use a small stack to perform this operation.
60 *
61 * @returns VBox status code (ignored).
62 * @param pDevExt The device extension.
63 */
64static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
65{
66 /*
67 * Query the required space.
68 */
69 VMMDevReqHypervisorInfo *pReq;
70 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
71 if (RT_FAILURE(rc))
72 return rc;
73 pReq->hypervisorStart = 0;
74 pReq->hypervisorSize = 0;
75 rc = VbglGRPerform(&pReq->header);
76 if (RT_FAILURE(rc)) /* this shouldn't happen! */
77 {
78 VbglGRFree(&pReq->header);
79 return rc;
80 }
81
82 /*
83 * The VMM will report back if there is nothing it wants to map, like for
84 * insance in VT-x and AMD-V mode.
85 */
86 if (pReq->hypervisorSize == 0)
87 Log(("vboxGuestInitFixateGuestMappings: nothing to do\n"));
88 else
89 {
90 /*
91 * We have to try several times since the host can be picky
92 * about certain addresses.
93 */
94 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
95 uint32_t cbHypervisor = pReq->hypervisorSize;
96 RTR0MEMOBJ ahTries[5];
97 uint32_t iTry;
98 Log(("vboxGuestInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
99 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
100 {
101 /*
102 * Reserve space, or if that isn't supported, create a object for
103 * some fictive physical memory and map that in to kernel space.
104 *
105 * To make the code a bit uglier, most systems cannot help with
106 * 4MB alignment, so we have to deal with that in addition to
107 * having two ways of getting the memory.
108 */
109 uint32_t uAlignment = _4M;
110 RTR0MEMOBJ hObj;
111 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
112 if (rc == VERR_NOT_SUPPORTED)
113 {
114 uAlignment = PAGE_SIZE;
115 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
116 }
117 if (rc == VERR_NOT_SUPPORTED)
118 {
119 if (hFictive == NIL_RTR0MEMOBJ)
120 {
121 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M);
122 if (RT_FAILURE(rc))
123 break;
124 hFictive = hObj;
125 }
126 uAlignment = _4M;
127 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
128 if (rc == VERR_NOT_SUPPORTED)
129 {
130 uAlignment = PAGE_SIZE;
131 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
132 }
133 }
134 if (RT_FAILURE(rc))
135 {
136 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
137 rc, cbHypervisor, uAlignment, iTry));
138 break;
139 }
140
141 /*
142 * Try set it.
143 */
144 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
145 pReq->header.rc = VERR_INTERNAL_ERROR;
146 pReq->hypervisorSize = cbHypervisor;
147 pReq->hypervisorStart = (uintptr_t)RTR0MemObjAddress(hObj);
148 if ( uAlignment == PAGE_SIZE
149 && pReq->hypervisorStart & (_4M - 1))
150 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
151 (pReq->hypervisorStart | (_4M - 1)) + 1;
152 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
153
154 rc = VbglGRPerform(&pReq->header);
155 if (RT_SUCCESS(rc))
156 {
157 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
158 Log(("vboxGuestInitFixateGuestMappings: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
159 RTR0MemObjAddress(pDevExt->hGuestMappings),
160 RTR0MemObjSize(pDevExt->hGuestMappings),
161 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
162 break;
163 }
164 ahTries[iTry] = hObj;
165 }
166
167 /*
168 * Cleanup failed attempts.
169 */
170 while (iTry-- > 0)
171 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
172 if ( RT_FAILURE(rc)
173 && hFictive != NIL_RTR0PTR)
174 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
175 }
176 VbglGRFree(&pReq->header);
177
178 /*
179 * We ignore failed attempts for now.
180 */
181 return VINF_SUCCESS;
182}
183
184
185/**
186 * Undo what vboxGuestInitFixateGuestMappings did.
187 *
188 * @param pDevExt The device extension.
189 */
190static void vboxGuestTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
191{
192 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
193 {
194 /*
195 * Tell the host that we're going to free the memory we reserved for
196 * it, the free it up. (Leak the memory if anything goes wrong here.)
197 */
198 VMMDevReqHypervisorInfo *pReq;
199 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
200 if (RT_SUCCESS(rc))
201 {
202 pReq->hypervisorStart = 0;
203 pReq->hypervisorSize = 0;
204 rc = VbglGRPerform(&pReq->header);
205 VbglGRFree(&pReq->header);
206 }
207 if (RT_SUCCESS(rc))
208 {
209 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
210 AssertRC(rc);
211 }
212 else
213 LogRel(("vboxGuestTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
214
215 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
216 }
217}
218
219
220/**
221 * Sets the interrupt filter mask during initialization and termination.
222 *
223 * This will ASSUME that we're the ones in carge over the mask, so
224 * we'll simply clear all bits we don't set.
225 *
226 * @returns VBox status code (ignored).
227 * @param pDevExt The device extension.
228 * @param fMask The new mask.
229 */
230static int vboxGuestSetFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask)
231{
232 VMMDevCtlGuestFilterMask *pReq;
233 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
234 if (RT_SUCCESS(rc))
235 {
236 pReq->u32OrMask = fMask;
237 pReq->u32NotMask = ~fMask;
238 rc = VbglGRPerform(&pReq->header);
239 if ( RT_FAILURE(rc)
240 || RT_FAILURE(pReq->header.rc))
241 LogRel(("vboxGuestSetFilterMask: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
242 rc, pReq->header.rc));
243 VbglGRFree(&pReq->header);
244 }
245 return rc;
246}
247
248
249/**
250 * Report guest information to the VMMDev.
251 *
252 * @returns VBox status code.
253 * @param pDevExt The device extension.
254 * @param enmOSType The OS type to report.
255 */
256static int vboxGuestInitReportGuestInfo(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
257{
258 VMMDevReportGuestInfo *pReq;
259 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_ReportGuestInfo);
260 if (RT_SUCCESS(rc))
261 {
262 pReq->guestInfo.additionsVersion = VMMDEV_VERSION;
263 pReq->guestInfo.osType = enmOSType;
264 rc = VbglGRPerform(&pReq->header);
265 if ( RT_FAILURE(rc)
266 || RT_FAILURE(pReq->header.rc))
267 LogRel(("vboxGuestInitReportGuestInfo: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
268 rc, pReq->header.rc));
269 VbglGRFree(&pReq->header);
270 }
271 return rc;
272}
273
274
275/**
276 * Initializes the VBoxGuest device extension when the
277 * device driver is loaded.
278 *
279 * The native code locates the VMMDev on the PCI bus and retrieve
280 * the MMIO and I/O port ranges, this function will take care of
281 * mapping the MMIO memory (if present). Upon successful return
282 * the native code should set up the interrupt handler.
283 *
284 * @returns VBox status code.
285 *
286 * @param pDevExt The device extension. Allocated by the native code.
287 * @param IOPortBase The base of the I/O port range.
288 * @param pvMMIOBase The base of the MMIO memory mapping.
289 * This is optional, pass NULL if not present.
290 * @param cbMMIO The size of the MMIO memory mapping.
291 * This is optional, pass 0 if not present.
292 * @param enmOSType The guest OS type to report to the VMMDev.
293 * @param fFixedEvents Events that will be enabled upon init and no client
294 * will ever be allowed to mask.
295 */
296int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
297 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
298{
299 int rc, rc2;
300
301 /*
302 * Adjust fFixedEvents.
303 */
304#ifdef VBOX_WITH_HGCM
305 fFixedEvents |= VMMDEV_EVENT_HGCM;
306#endif
307
308 /*
309 * Initalize the data.
310 */
311 pDevExt->IOPortBase = IOPortBase;
312 pDevExt->pVMMDevMemory = NULL;
313 pDevExt->fFixedEvents = fFixedEvents;
314 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
315 pDevExt->pIrqAckEvents = NULL;
316 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
317 pDevExt->WaitList.pHead = NULL;
318 pDevExt->WaitList.pTail = NULL;
319#ifdef VBOX_WITH_HGCM
320 pDevExt->HGCMWaitList.pHead = NULL;
321 pDevExt->HGCMWaitList.pTail = NULL;
322#endif
323 pDevExt->FreeList.pHead = NULL;
324 pDevExt->FreeList.pTail = NULL;
325 pDevExt->f32PendingEvents = 0;
326 pDevExt->u32ClipboardClientId = 0;
327 pDevExt->u32MousePosChangedSeq = 0;
328
329 /*
330 * If there is an MMIO region validate the version and size.
331 */
332 if (pvMMIOBase)
333 {
334 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
335 Assert(cbMMIO);
336 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
337 && pVMMDev->u32Size >= 32
338 && pVMMDev->u32Size <= cbMMIO)
339 {
340 pDevExt->pVMMDevMemory = pVMMDev;
341 Log(("VBoxGuestInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
342 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
343 }
344 else /* try live without it. */
345 LogRel(("VBoxGuestInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
346 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
347 }
348
349 /*
350 * Create the wait and seesion spinlocks.
351 */
352 rc = RTSpinlockCreate(&pDevExt->EventSpinlock);
353 if (RT_SUCCESS(rc))
354 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock);
355 if (RT_FAILURE(rc))
356 {
357 Log(("VBoxGuestInitDevExt: failed to spinlock, rc=%d!\n", rc));
358 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
359 RTSpinlockDestroy(pDevExt->EventSpinlock);
360 return rc;
361 }
362
363 /*
364 * Initialize the guest library and report the guest info back to VMMDev,
365 * set the interrupt control filter mask, and fixate the guest mappings
366 * made by the VMM.
367 */
368 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
369 if (RT_SUCCESS(rc))
370 {
371 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
372 if (RT_SUCCESS(rc))
373 {
374 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
375 Assert(pDevExt->PhysIrqAckEvents != 0);
376
377 rc = vboxGuestInitReportGuestInfo(pDevExt, enmOSType);
378 if (RT_SUCCESS(rc))
379 {
380 rc = vboxGuestSetFilterMask(pDevExt, fFixedEvents);
381 if (RT_SUCCESS(rc))
382 {
383 /*
384 * Disable guest graphics capability by default. The guest specific
385 * graphics driver will re-enable this when it is necessary.
386 */
387 rc = VBoxGuestSetGuestCapabilities(0, VMMDEV_GUEST_SUPPORTS_GRAPHICS);
388 if (RT_SUCCESS(rc))
389 {
390 vboxGuestInitFixateGuestMappings(pDevExt);
391 Log(("VBoxGuestInitDevExt: returns success\n"));
392 return VINF_SUCCESS;
393 }
394 }
395 }
396
397 /* failure cleanup */
398 }
399 else
400 Log(("VBoxGuestInitDevExt: VBoxGRAlloc failed, rc=%Rrc\n", rc));
401
402 VbglTerminate();
403 }
404 else
405 Log(("VBoxGuestInitDevExt: VbglInit failed, rc=%Rrc\n", rc));
406
407 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
408 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
409 return rc; /* (failed) */
410}
411
412
413/**
414 * Deletes all the items in a wait chain.
415 * @param pWait The head of the chain.
416 */
417static void VBoxGuestDeleteWaitList(PVBOXGUESTWAITLIST pList)
418{
419 while (pList->pHead)
420 {
421 int rc2;
422 PVBOXGUESTWAIT pWait = pList->pHead;
423 pList->pHead = pWait->pNext;
424
425 pWait->pNext = NULL;
426 pWait->pPrev = NULL;
427 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
428 pWait->Event = NIL_RTSEMEVENTMULTI;
429 pWait->pSession = NULL;
430 RTMemFree(pWait);
431 }
432 pList->pHead = NULL;
433 pList->pTail = NULL;
434}
435
436
437/**
438 * Destroys the VBoxGuest device extension.
439 *
440 * The native code should call this before the driver is loaded,
441 * but don't call this on shutdown.
442 *
443 * @param pDevExt The device extension.
444 */
445void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
446{
447 int rc2;
448 Log(("VBoxGuestDeleteDevExt:\n"));
449 LogRel(("VBoxGuest: The additions driver is terminating.\n"));
450
451 /*
452 * Unfix the guest mappings, filter all events and clear
453 * all capabilities.
454 */
455 vboxGuestTermUnfixGuestMappings(pDevExt);
456 VBoxGuestSetGuestCapabilities(0, UINT32_MAX);
457 vboxGuestSetFilterMask(pDevExt, 0);
458
459 /*
460 * Cleanup resources.
461 */
462 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
463 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
464
465 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
466#ifdef VBOX_WITH_HGCM
467 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
468#endif
469 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
470
471 VbglTerminate();
472
473 pDevExt->pVMMDevMemory = NULL;
474
475 pDevExt->IOPortBase = 0;
476 pDevExt->pIrqAckEvents = NULL;
477}
478
479
480/**
481 * Creates a VBoxGuest user session.
482 *
483 * The native code calls this when a ring-3 client opens the device.
484 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
485 *
486 * @returns VBox status code.
487 * @param pDevExt The device extension.
488 * @param ppSession Where to store the session on success.
489 */
490int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
491{
492 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
493 if (RT_UNLIKELY(!pSession))
494 {
495 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
496 return VERR_NO_MEMORY;
497 }
498
499 pSession->Process = RTProcSelf();
500 pSession->R0Process = RTR0ProcHandleSelf();
501 pSession->pDevExt = pDevExt;
502
503 *ppSession = pSession;
504 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
505 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
506 return VINF_SUCCESS;
507}
508
509
510/**
511 * Creates a VBoxGuest kernel session.
512 *
513 * The native code calls this when a ring-0 client connects to the device.
514 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
515 *
516 * @returns VBox status code.
517 * @param pDevExt The device extension.
518 * @param ppSession Where to store the session on success.
519 */
520int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
521{
522 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
523 if (RT_UNLIKELY(!pSession))
524 {
525 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
526 return VERR_NO_MEMORY;
527 }
528
529 pSession->Process = NIL_RTPROCESS;
530 pSession->R0Process = NIL_RTR0PROCESS;
531 pSession->pDevExt = pDevExt;
532
533 *ppSession = pSession;
534 LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
535 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
536 return VINF_SUCCESS;
537}
538
539
540
541/**
542 * Closes a VBoxGuest session.
543 *
544 * @param pDevExt The device extension.
545 * @param pSession The session to close (and free).
546 */
547void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
548{
549 unsigned i; NOREF(i);
550 Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
551 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
552
553#ifdef VBOX_WITH_HGCM
554 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
555 if (pSession->aHGCMClientIds[i])
556 {
557 VBoxGuestHGCMDisconnectInfo Info;
558 Info.result = 0;
559 Info.u32ClientID = pSession->aHGCMClientIds[i];
560 pSession->aHGCMClientIds[i] = 0;
561 Log(("VBoxGuestCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
562 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
563 }
564#endif
565
566 pSession->pDevExt = NULL;
567 pSession->Process = NIL_RTPROCESS;
568 pSession->R0Process = NIL_RTR0PROCESS;
569 RTMemFree(pSession);
570}
571
572
573/**
574 * Links the wait-for-event entry into the tail of the given list.
575 *
576 * @param pList The list to link it into.
577 * @param pWait The wait for event entry to append.
578 */
579DECLINLINE(void) VBoxGuestWaitAppend(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
580{
581 const PVBOXGUESTWAIT pTail = pList->pTail;
582 pWait->pNext = NULL;
583 pWait->pPrev = pTail;
584 if (pTail)
585 pTail->pNext = pWait;
586 else
587 pList->pHead = pWait;
588 pList->pTail = pWait;
589}
590
591
592/**
593 * Unlinks the wait-for-event entry.
594 *
595 * @param pList The list to unlink it from.
596 * @param pWait The wait for event entry to unlink.
597 */
598DECLINLINE(void) VBoxGuestWaitUnlink(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
599{
600 const PVBOXGUESTWAIT pPrev = pWait->pPrev;
601 const PVBOXGUESTWAIT pNext = pWait->pNext;
602 if (pNext)
603 pNext->pPrev = pPrev;
604 else
605 pList->pTail = pPrev;
606 if (pPrev)
607 pPrev->pNext = pNext;
608 else
609 pList->pHead = pNext;
610}
611
612
613/**
614 * Allocates a wiat-for-event entry.
615 *
616 * @returns The wait-for-event entry.
617 * @param pDevExt The device extension.
618 * @param pSession The session that's allocating this. Can be NULL.
619 */
620static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
621{
622 /*
623 * Allocate it one way or the other.
624 */
625 PVBOXGUESTWAIT pWait = pDevExt->FreeList.pTail;
626 if (pWait)
627 {
628 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
629 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
630
631 pWait = pDevExt->FreeList.pTail;
632 if (pWait)
633 VBoxGuestWaitUnlink(&pDevExt->FreeList, pWait);
634
635 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
636 }
637 if (!pWait)
638 {
639 static unsigned s_cErrors = 0;
640 int rc;
641
642 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
643 if (!pWait)
644 {
645 if (s_cErrors++ < 32)
646 LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
647 return NULL;
648 }
649
650 rc = RTSemEventMultiCreate(&pWait->Event);
651 if (RT_FAILURE(rc))
652 {
653 if (s_cErrors++ < 32)
654 LogRel(("VBoxGuestCommonIOCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
655 RTMemFree(pWait);
656 return NULL;
657 }
658 }
659
660 /*
661 * Zero members just as an precaution.
662 */
663 pWait->pNext = NULL;
664 pWait->pPrev = NULL;
665 pWait->fReqEvents = 0;
666 pWait->fResEvents = 0;
667 pWait->pSession = pSession;
668#ifdef VBOX_WITH_HGCM
669 pWait->pHGCMReq = NULL;
670#endif
671 RTSemEventMultiReset(pWait->Event);
672 return pWait;
673}
674
675
676/**
677 * Frees the wait-for-event entry.
678 * The caller must own the wait spinlock!
679 *
680 * @param pDevExt The device extension.
681 * @param pWait The wait-for-event entry to free.
682 */
683static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
684{
685 pWait->fReqEvents = 0;
686 pWait->fResEvents = 0;
687#ifdef VBOX_WITH_HGCM
688 pWait->pHGCMReq = NULL;
689#endif
690 VBoxGuestWaitAppend(&pDevExt->FreeList, pWait);
691}
692
693
694/**
695 * Frees the wait-for-event entry.
696 *
697 * @param pDevExt The device extension.
698 * @param pWait The wait-for-event entry to free.
699 */
700static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
701{
702 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
703 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
704 VBoxGuestWaitFreeLocked(pDevExt, pWait);
705 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
706}
707
708
709/**
710 * Modifies the guest capabilities.
711 *
712 * Should be called during driver init and termination.
713 *
714 * @returns VBox status code.
715 * @param fOr The Or mask (what to enable).
716 * @param fNot The Not mask (what to disable).
717 */
718int VBoxGuestSetGuestCapabilities(uint32_t fOr, uint32_t fNot)
719{
720 VMMDevReqGuestCapabilities2 *pReq;
721 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
722 if (RT_FAILURE(rc))
723 {
724 Log(("VBoxGuestSetGuestCapabilities: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
725 sizeof(*pReq), sizeof(*pReq), rc));
726 return rc;
727 }
728
729 pReq->u32OrMask = fOr;
730 pReq->u32NotMask = fNot;
731
732 rc = VbglGRPerform(&pReq->header);
733 if (RT_FAILURE(rc))
734 Log(("VBoxGuestSetGuestCapabilities:VbglGRPerform failed, rc=%Rrc!\n", rc));
735 else if (RT_FAILURE(pReq->header.rc))
736 {
737 Log(("VBoxGuestSetGuestCapabilities: The request failed; VMMDev rc=%Rrc!\n", pReq->header.rc));
738 rc = pReq->header.rc;
739 }
740
741 VbglGRFree(&pReq->header);
742 return rc;
743}
744
745
746/**
747 * Implements the fast (no input or output) type of IOCtls.
748 *
749 * This is currently just a placeholder stub inherited from the support driver code.
750 *
751 * @returns VBox status code.
752 * @param iFunction The IOCtl function number.
753 * @param pDevExt The device extension.
754 * @param pSession The session.
755 */
756int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
757{
758 Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
759
760 return VERR_NOT_SUPPORTED;
761}
762
763
764
765static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
766{
767 Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
768 pInfo->portAddress = pDevExt->IOPortBase;
769 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
770 if (pcbDataReturned)
771 *pcbDataReturned = sizeof(*pInfo);
772 return VINF_SUCCESS;
773}
774
775
776/**
777 * Worker VBoxGuestCommonIOCtl_WaitEvent.
778 * The caller enters the spinlock, we may or may not leave it.
779 *
780 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
781 */
782DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWaitEventInfo *pInfo,
783 int iEvent, const uint32_t fReqEvents, PRTSPINLOCKTMP pTmp)
784{
785 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
786 if (fMatches)
787 {
788 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
789 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, pTmp);
790
791 pInfo->u32EventFlagsOut = fMatches;
792 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
793 if (fReqEvents & ~((uint32_t)1 << iEvent))
794 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
795 else
796 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
797 return VINF_SUCCESS;
798 }
799 return VERR_TIMEOUT;
800}
801
802
803static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
804 VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned, bool fInterruptible)
805{
806 pInfo->u32EventFlagsOut = 0;
807 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
808 if (pcbDataReturned)
809 *pcbDataReturned = sizeof(*pInfo);
810
811 /*
812 * Copy and verify the input mask.
813 */
814 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
815 int iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
816 if (RT_UNLIKELY(iEvent < 0))
817 {
818 Log(("VBoxGuestCommonIOCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
819 return VERR_INVALID_PARAMETER;
820 }
821
822 /*
823 * Check the condition up front, before doing the wait-for-event allocations.
824 */
825 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
826 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
827 int rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
828 if (rc == VINF_SUCCESS)
829 return rc;
830 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
831
832 if (!pInfo->u32TimeoutIn)
833 {
834 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
835 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
836 return VERR_TIMEOUT;
837 }
838
839 PVBOXGUESTWAIT pWait = VBoxGuestWaitAlloc(pDevExt, pSession);
840 if (!pWait)
841 return VERR_NO_MEMORY;
842 pWait->fReqEvents = fReqEvents;
843
844 /*
845 * We've got the wait entry now, re-enter the spinlock and check for the condition.
846 * If the wait condition is met, return.
847 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
848 */
849 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
850 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
851 if (rc == VINF_SUCCESS)
852 {
853 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
854 return rc;
855 }
856 VBoxGuestWaitAppend(&pDevExt->WaitList, pWait);
857 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
858
859 if (fInterruptible)
860 rc = RTSemEventMultiWaitNoResume(pWait->Event,
861 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
862 else
863 rc = RTSemEventMultiWait(pWait->Event,
864 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
865
866 /*
867 * There is one special case here and that's when the semaphore is
868 * destroyed upon device driver unload. This shouldn't happen of course,
869 * but in case it does, just get out of here ASAP.
870 */
871 if (rc == VERR_SEM_DESTROYED)
872 return rc;
873
874 /*
875 * Unlink the wait item and dispose of it.
876 */
877 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
878 VBoxGuestWaitUnlink(&pDevExt->WaitList, pWait);
879 const uint32_t fResEvents = pWait->fResEvents;
880 VBoxGuestWaitFreeLocked(pDevExt, pWait);
881 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
882
883 /*
884 * Now deal with the return code.
885 */
886 if ( fResEvents
887 && fResEvents != UINT32_MAX)
888 {
889 pInfo->u32EventFlagsOut = fResEvents;
890 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
891 if (fReqEvents & ~((uint32_t)1 << iEvent))
892 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
893 else
894 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
895 rc = VINF_SUCCESS;
896 }
897 else if ( fResEvents == UINT32_MAX
898 || rc == VERR_INTERRUPTED)
899 {
900 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
901 rc == VERR_INTERRUPTED;
902 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
903 }
904 else if (rc == VERR_TIMEOUT)
905 {
906 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
907 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
908 }
909 else
910 {
911 if (RT_SUCCESS(rc))
912 {
913 static unsigned s_cErrors = 0;
914 if (s_cErrors++ < 32)
915 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
916 rc = VERR_INTERNAL_ERROR;
917 }
918 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
919 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc\n", rc));
920 }
921
922 return rc;
923}
924
925
926static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
927{
928 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
929 PVBOXGUESTWAIT pWait;
930 int rc = 0;
931
932 Log(("VBoxGuestCommonIOCtl: CANCEL_ALL_WAITEVENTS\n"));
933
934 /*
935 * Walk the event list and wake up anyone with a matching session.
936 */
937 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
938 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
939 if (pWait->pSession == pSession)
940 {
941 pWait->fResEvents = UINT32_MAX;
942 rc |= RTSemEventMultiSignal(pWait->Event);
943 }
944 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
945 Assert(rc == 0);
946
947 return VINF_SUCCESS;
948}
949
950
951static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
952 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
953{
954 Log(("VBoxGuestCommonIOCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
955
956 /*
957 * Validate the header and request size.
958 */
959 const VMMDevRequestType enmType = pReqHdr->requestType;
960 const uint32_t cbReq = pReqHdr->size;
961 const uint32_t cbMinSize = vmmdevGetRequestSize(enmType);
962 if (cbReq < cbMinSize)
963 {
964 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
965 cbReq, cbMinSize, enmType));
966 return VERR_INVALID_PARAMETER;
967 }
968 if (cbReq > cbData)
969 {
970 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
971 cbData, cbReq, enmType));
972 return VERR_INVALID_PARAMETER;
973 }
974
975 /*
976 * Make a copy of the request in the physical memory heap so
977 * the VBoxGuestLibrary can more easily deal with the request.
978 * (This is really a waste of time since the OS or the OS specific
979 * code has already buffered or locked the input/output buffer, but
980 * it does makes things a bit simpler wrt to phys address.)
981 */
982 VMMDevRequestHeader *pReqCopy;
983 int rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
984 if (RT_FAILURE(rc))
985 {
986 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
987 cbReq, cbReq, rc));
988 return rc;
989 }
990 memcpy(pReqCopy, pReqHdr, cbReq);
991
992 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
993 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
994
995 rc = VbglGRPerform(pReqCopy);
996 if ( RT_SUCCESS(rc)
997 && RT_SUCCESS(pReqCopy->rc))
998 {
999 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
1000 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
1001
1002 memcpy(pReqHdr, pReqCopy, cbReq);
1003 if (pcbDataReturned)
1004 *pcbDataReturned = cbReq;
1005 }
1006 else if (RT_FAILURE(rc))
1007 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
1008 else
1009 {
1010 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
1011 rc = pReqCopy->rc;
1012 }
1013
1014 VbglGRFree(pReqCopy);
1015 return rc;
1016}
1017
1018
1019static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, VBoxGuestFilterMaskInfo *pInfo)
1020{
1021 VMMDevCtlGuestFilterMask *pReq;
1022 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
1023 if (RT_FAILURE(rc))
1024 {
1025 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
1026 sizeof(*pReq), sizeof(*pReq), rc));
1027 return rc;
1028 }
1029
1030 pReq->u32OrMask = pInfo->u32OrMask;
1031 pReq->u32NotMask = pInfo->u32NotMask;
1032 pReq->u32NotMask &= ~pDevExt->fFixedEvents; /* don't permit these to be cleared! */
1033 rc = VbglGRPerform(&pReq->header);
1034 if (RT_FAILURE(rc))
1035 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: VbglGRPerform failed, rc=%Rrc!\n", rc));
1036 else if (RT_FAILURE(pReq->header.rc))
1037 {
1038 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: The request failed; VMMDev rc=%Rrc!\n", pReq->header.rc));
1039 rc = pReq->header.rc;
1040 }
1041
1042 VbglGRFree(&pReq->header);
1043 return rc;
1044}
1045
1046#ifdef VBOX_WITH_HGCM
1047
1048AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
1049
1050/** Worker for VBoxGuestHGCMAsyncWaitCallback*. */
1051static int VBoxGuestHGCMAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
1052 bool fInterruptible, uint32_t cMillies)
1053{
1054
1055 /*
1056 * Check to see if the condition was met by the time we got here.
1057 *
1058 * We create a simple poll loop here for dealing with out-of-memory
1059 * conditions since the caller isn't necessarily able to deal with
1060 * us returning too early.
1061 */
1062 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1063 PVBOXGUESTWAIT pWait;
1064 for (;;)
1065 {
1066 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1067 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1068 {
1069 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1070 return VINF_SUCCESS;
1071 }
1072 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1073
1074 pWait = VBoxGuestWaitAlloc(pDevExt, NULL);
1075 if (pWait)
1076 break;
1077 if (fInterruptible)
1078 return VERR_INTERRUPTED;
1079 RTThreadSleep(1);
1080 }
1081 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
1082 pWait->pHGCMReq = pHdr;
1083
1084 /*
1085 * Re-enter the spinlock and re-check for the condition.
1086 * If the condition is met, return.
1087 * Otherwise link us into the HGCM wait list and go to sleep.
1088 */
1089 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1090 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1091 {
1092 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1093 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1094 return VINF_SUCCESS;
1095 }
1096 VBoxGuestWaitAppend(&pDevExt->HGCMWaitList, pWait);
1097 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1098
1099 int rc;
1100 if (fInterruptible)
1101 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
1102 else
1103 rc = RTSemEventMultiWait(pWait->Event, cMillies);
1104 if (rc == VERR_SEM_DESTROYED)
1105 return rc;
1106
1107 /*
1108 * Unlink, free and return.
1109 */
1110 if ( RT_FAILURE(rc)
1111 && rc != VERR_TIMEOUT
1112 && ( !fInterruptible
1113 || rc != VERR_INTERRUPTED))
1114 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
1115
1116 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1117 VBoxGuestWaitUnlink(&pDevExt->HGCMWaitList, pWait);
1118 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1119 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1120 return rc;
1121}
1122
1123
1124/**
1125 * This is a callback for dealing with async waits.
1126 *
1127 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1128 */
1129static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
1130{
1131 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1132 Log(("VBoxGuestHGCMAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
1133 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1134 pDevExt,
1135 false /* fInterruptible */,
1136 u32User /* cMillies */);
1137}
1138
1139
1140/**
1141 * This is a callback for dealing with async waits with a timeout.
1142 *
1143 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1144 */
1145static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr,
1146 void *pvUser, uint32_t u32User)
1147{
1148 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1149 Log(("VBoxGuestHGCMAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
1150 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1151 pDevExt,
1152 true /* fInterruptible */,
1153 u32User /* cMillies */ );
1154
1155}
1156
1157
1158static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMConnectInfo *pInfo,
1159 size_t *pcbDataReturned)
1160{
1161 /*
1162 * The VbglHGCMConnect call will invoke the callback if the HGCM
1163 * call is performed in an ASYNC fashion. The function is not able
1164 * to deal with cancelled requests.
1165 */
1166 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
1167 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
1168 ? pInfo->Loc.u.host.achName : "<not local host>"));
1169
1170 int rc = VbglR0HGCMInternalConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1171 if (RT_SUCCESS(rc))
1172 {
1173 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
1174 pInfo->u32ClientID, pInfo->result, rc));
1175 if (RT_SUCCESS(pInfo->result))
1176 {
1177 /*
1178 * Append the client id to the client id table.
1179 * If the table has somehow become filled up, we'll disconnect the session.
1180 */
1181 unsigned i;
1182 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1183 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1184 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1185 if (!pSession->aHGCMClientIds[i])
1186 {
1187 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
1188 break;
1189 }
1190 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1191 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1192 {
1193 static unsigned s_cErrors = 0;
1194 if (s_cErrors++ < 32)
1195 LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
1196
1197 VBoxGuestHGCMDisconnectInfo Info;
1198 Info.result = 0;
1199 Info.u32ClientID = pInfo->u32ClientID;
1200 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1201 return VERR_TOO_MANY_OPEN_FILES;
1202 }
1203 }
1204 if (pcbDataReturned)
1205 *pcbDataReturned = sizeof(*pInfo);
1206 }
1207 return rc;
1208}
1209
1210
1211static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
1212 size_t *pcbDataReturned)
1213{
1214 /*
1215 * Validate the client id and invalidate its entry while we're in the call.
1216 */
1217 const uint32_t u32ClientId = pInfo->u32ClientID;
1218 unsigned i;
1219 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1220 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1221 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1222 if (pSession->aHGCMClientIds[i] == u32ClientId)
1223 {
1224 pSession->aHGCMClientIds[i] = UINT32_MAX;
1225 break;
1226 }
1227 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1228 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1229 {
1230 static unsigned s_cErrors = 0;
1231 if (s_cErrors++ > 32)
1232 LogRel(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
1233 return VERR_INVALID_HANDLE;
1234 }
1235
1236 /*
1237 * The VbglHGCMConnect call will invoke the callback if the HGCM
1238 * call is performed in an ASYNC fashion. The function is not able
1239 * to deal with cancelled requests.
1240 */
1241 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
1242 int rc = VbglR0HGCMInternalDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1243 if (RT_SUCCESS(rc))
1244 {
1245 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
1246 if (pcbDataReturned)
1247 *pcbDataReturned = sizeof(*pInfo);
1248 }
1249
1250 /* Update the client id array according to the result. */
1251 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1252 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
1253 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
1254 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1255
1256 return rc;
1257}
1258
1259
1260static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt,
1261 PVBOXGUESTSESSION pSession,
1262 VBoxGuestHGCMCallInfo *pInfo,
1263 uint32_t cMillies, bool fInterruptible, bool f32bit,
1264 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
1265{
1266 /*
1267 * Some more validations.
1268 */
1269 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
1270 {
1271 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
1272 return VERR_INVALID_PARAMETER;
1273 }
1274 size_t cbActual = cbExtra + sizeof(*pInfo);
1275#ifdef RT_ARCH_AMD64
1276 if (f32bit)
1277 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
1278 else
1279#endif
1280 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
1281 if (cbData < cbActual)
1282 {
1283 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
1284 cbData, cbActual));
1285 return VERR_INVALID_PARAMETER;
1286 }
1287
1288 /*
1289 * Validate the client id.
1290 */
1291 const uint32_t u32ClientId = pInfo->u32ClientID;
1292 unsigned i;
1293 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1294 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1295 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1296 if (pSession->aHGCMClientIds[i] == u32ClientId)
1297 break;
1298 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1299 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
1300 {
1301 static unsigned s_cErrors = 0;
1302 if (s_cErrors++ > 32)
1303 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
1304 return VERR_INVALID_HANDLE;
1305 }
1306
1307 /*
1308 * The VbglHGCMCall call will invoke the callback if the HGCM
1309 * call is performed in an ASYNC fashion. This function can
1310 * deal with cancelled requests, so we let user more requests
1311 * be interruptible (should add a flag for this later I guess).
1312 */
1313 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
1314 int rc;
1315 uint32_t fFlags = pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
1316#ifdef RT_ARCH_AMD64
1317 if (f32bit)
1318 {
1319 if (fInterruptible)
1320 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1321 else
1322 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1323 }
1324 else
1325#endif
1326 {
1327 if (fInterruptible)
1328 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1329 else
1330 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1331 }
1332 if (RT_SUCCESS(rc))
1333 {
1334 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
1335 if (pcbDataReturned)
1336 *pcbDataReturned = cbActual;
1337 }
1338 else
1339 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: Failed. rc=%Rrc.\n", rc));
1340 return rc;
1341}
1342
1343
1344/**
1345 * @returns VBox status code. Unlike the other HGCM IOCtls this will combine
1346 * the VbglHGCMConnect/Disconnect return code with the Info.result.
1347 */
1348static int VBoxGuestCommonIOCtl_HGCMClipboardReConnect(PVBOXGUESTDEVEXT pDevExt, uint32_t *pu32ClientId, size_t *pcbDataReturned)
1349{
1350 int rc;
1351 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: Current u32ClientId=%RX32\n", pDevExt->u32ClipboardClientId));
1352
1353
1354 /*
1355 * If there is an old client, try disconnect it first.
1356 */
1357 if (pDevExt->u32ClipboardClientId != 0)
1358 {
1359 VBoxGuestHGCMDisconnectInfo Info;
1360 Info.result = VERR_WRONG_ORDER;
1361 Info.u32ClientID = pDevExt->u32ClipboardClientId;
1362 rc = VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1363 if (RT_SUCCESS(rc))
1364 {
1365 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. VbglHGCMDisconnect -> rc=%Rrc\n", rc));
1366 return rc;
1367 }
1368 if (RT_FAILURE((int32_t)Info.result))
1369 {
1370 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. Info.result=%Rrc\n", rc));
1371 return Info.result;
1372 }
1373 pDevExt->u32ClipboardClientId = 0;
1374 }
1375
1376 /*
1377 * Try connect.
1378 */
1379 VBoxGuestHGCMConnectInfo Info;
1380 Info.Loc.type = VMMDevHGCMLoc_LocalHost_Existing;
1381 strcpy(Info.Loc.u.host.achName, "VBoxSharedClipboard");
1382 Info.u32ClientID = 0;
1383 Info.result = VERR_WRONG_ORDER;
1384
1385 rc = VbglR0HGCMInternalConnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1386 if (RT_FAILURE(rc))
1387 {
1388 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1389 return rc;
1390 }
1391 if (RT_FAILURE(Info.result))
1392 {
1393 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1394 return rc;
1395 }
1396
1397 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: connected successfully u32ClientId=%RX32\n", Info.u32ClientID));
1398
1399 pDevExt->u32ClipboardClientId = Info.u32ClientID;
1400 *pu32ClientId = Info.u32ClientID;
1401 if (pcbDataReturned)
1402 *pcbDataReturned = sizeof(uint32_t);
1403
1404 return VINF_SUCCESS;
1405}
1406
1407#endif /* VBOX_WITH_HGCM */
1408
1409/**
1410 * Guest backdoor logging.
1411 *
1412 * @returns VBox status code.
1413 *
1414 * @param pch The log message (need not be NULL terminated).
1415 * @param cbData Size of the buffer.
1416 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
1417 */
1418static int VBoxGuestCommonIOCtl_Log(const char *pch, size_t cbData, size_t *pcbDataReturned)
1419{
1420 Log(("%.*s", cbData, pch));
1421 if (pcbDataReturned)
1422 *pcbDataReturned = 0;
1423 return VINF_SUCCESS;
1424}
1425
1426
1427/**
1428 * Common IOCtl for user to kernel and kernel to kernel communcation.
1429 *
1430 * This function only does the basic validation and then invokes
1431 * worker functions that takes care of each specific function.
1432 *
1433 * @returns VBox status code.
1434 *
1435 * @param iFunction The requested function.
1436 * @param pDevExt The device extension.
1437 * @param pSession The client session.
1438 * @param pvData The input/output data buffer. Can be NULL depending on the function.
1439 * @param cbData The max size of the data buffer.
1440 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
1441 */
1442int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1443 void *pvData, size_t cbData, size_t *pcbDataReturned)
1444{
1445 Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
1446 iFunction, pDevExt, pSession, pvData, cbData));
1447
1448 /*
1449 * Make sure the returned data size is set to zero.
1450 */
1451 if (pcbDataReturned)
1452 *pcbDataReturned = 0;
1453
1454 /*
1455 * Define some helper macros to simplify validation.
1456 */
1457#define CHECKRET_RING0(mnemonic) \
1458 do { \
1459 if (pSession->R0Process != NIL_RTR0PROCESS) \
1460 { \
1461 Log(("VBoxGuestCommonIOCtl: " mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
1462 pSession->Process, (uintptr_t)pSession->R0Process)); \
1463 return VERR_PERMISSION_DENIED; \
1464 } \
1465 } while (0)
1466#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
1467 do { \
1468 if (cbData < (cbMin)) \
1469 { \
1470 Log(("VBoxGuestCommonIOCtl: " mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
1471 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
1472 return VERR_BUFFER_OVERFLOW; \
1473 } \
1474 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
1475 { \
1476 Log(("VBoxGuestCommonIOCtl: " mnemonic ": Invalid pointer %p\n", pvData)); \
1477 return VERR_INVALID_POINTER; \
1478 } \
1479 } while (0)
1480
1481
1482 /*
1483 * Deal with variably sized requests first.
1484 */
1485 int rc = VINF_SUCCESS;
1486 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
1487 {
1488 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
1489 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
1490 }
1491#ifdef VBOX_WITH_HGCM
1492 /*
1493 * These ones are a bit tricky.
1494 */
1495 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
1496 {
1497 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
1498 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
1499 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
1500 fInterruptible, false /*f32bit*/,
1501 0, cbData, pcbDataReturned);
1502 }
1503 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
1504 {
1505 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
1506 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
1507 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
1508 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
1509 false /*f32bit*/,
1510 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
1511 }
1512# ifdef RT_ARCH_AMD64
1513 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
1514 {
1515 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
1516 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
1517 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
1518 fInterruptible, true /*f32bit*/,
1519 0, cbData, pcbDataReturned);
1520 }
1521 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
1522 {
1523 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
1524 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
1525 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
1526 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
1527 true /*f32bit*/,
1528 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
1529 }
1530# endif
1531#endif /* VBOX_WITH_HGCM */
1532 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
1533 {
1534 CHECKRET_MIN_SIZE("LOG", 1);
1535 rc = VBoxGuestCommonIOCtl_Log((char *)pvData, cbData, pcbDataReturned);
1536 }
1537 else
1538 {
1539 switch (iFunction)
1540 {
1541 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
1542 CHECKRET_RING0("GETVMMDEVPORT");
1543 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
1544 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
1545 break;
1546
1547 case VBOXGUEST_IOCTL_WAITEVENT:
1548 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
1549 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, pSession, (VBoxGuestWaitEventInfo *)pvData,
1550 pcbDataReturned, pSession->R0Process != NIL_RTR0PROCESS);
1551 break;
1552
1553 case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
1554 if (cbData != 0)
1555 rc = VERR_INVALID_PARAMETER;
1556 rc = VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
1557 break;
1558
1559 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
1560 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
1561 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, (VBoxGuestFilterMaskInfo *)pvData);
1562 break;
1563
1564#ifdef VBOX_WITH_HGCM
1565 case VBOXGUEST_IOCTL_HGCM_CONNECT:
1566# ifdef RT_ARCH_AMD64
1567 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
1568# endif
1569 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
1570 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
1571 break;
1572
1573 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
1574# ifdef RT_ARCH_AMD64
1575 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
1576# endif
1577 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
1578 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
1579 break;
1580
1581 case VBOXGUEST_IOCTL_CLIPBOARD_CONNECT:
1582 CHECKRET_MIN_SIZE("CLIPBOARD_CONNECT", sizeof(uint32_t));
1583 rc = VBoxGuestCommonIOCtl_HGCMClipboardReConnect(pDevExt, (uint32_t *)pvData, pcbDataReturned);
1584 break;
1585#endif /* VBOX_WITH_HGCM */
1586
1587 default:
1588 {
1589 Log(("VBoxGuestCommonIOCtl: Unknown request iFunction=%#x Stripped size=%#x\n", iFunction,
1590 VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
1591 rc = VERR_NOT_SUPPORTED;
1592 break;
1593 }
1594 }
1595 }
1596
1597 Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
1598 return rc;
1599}
1600
1601
1602
1603/**
1604 * Common interrupt service routine.
1605 *
1606 * This deals with events and with waking up thread waiting for those events.
1607 *
1608 * @returns true if it was our interrupt, false if it wasn't.
1609 * @param pDevExt The VBoxGuest device extension.
1610 */
1611bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
1612{
1613 bool fMousePositionChanged = false;
1614 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1615 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
1616 int rc = 0;
1617 bool fOurIrq;
1618
1619 /*
1620 * Make sure we've initalized the device extension.
1621 */
1622 if (RT_UNLIKELY(!pReq))
1623 return false;
1624
1625 /*
1626 * Enter the spinlock and check if it's our IRQ or not.
1627 */
1628 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1629 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
1630 if (fOurIrq)
1631 {
1632 /*
1633 * Acknowlegde events.
1634 * We don't use VbglGRPerform here as it may take another spinlocks.
1635 */
1636 pReq->header.rc = VERR_INTERNAL_ERROR;
1637 pReq->events = 0;
1638 ASMCompilerBarrier();
1639 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
1640 ASMCompilerBarrier(); /* paranoia */
1641 if (RT_SUCCESS(pReq->header.rc))
1642 {
1643 uint32_t fEvents = pReq->events;
1644 PVBOXGUESTWAIT pWait;
1645
1646 Log(("VBoxGuestCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
1647
1648 /*
1649 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
1650 */
1651 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
1652 {
1653 fMousePositionChanged = true;
1654 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
1655 }
1656
1657#ifdef VBOX_WITH_HGCM
1658 /*
1659 * The HGCM event/list is kind of different in that we evaluate all entries.
1660 */
1661 if (fEvents & VMMDEV_EVENT_HGCM)
1662 {
1663 for (pWait = pDevExt->HGCMWaitList.pHead; pWait; pWait = pWait->pNext)
1664 if ( !pWait->fResEvents
1665 && (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE))
1666 {
1667 pWait->fResEvents = VMMDEV_EVENT_HGCM;
1668 rc |= RTSemEventMultiSignal(pWait->Event);
1669 }
1670 fEvents &= ~VMMDEV_EVENT_HGCM;
1671 }
1672#endif
1673
1674 /*
1675 * Normal FIFO waiter evaluation.
1676 */
1677 fEvents |= pDevExt->f32PendingEvents;
1678 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
1679 if ( (pWait->fReqEvents & fEvents)
1680 && !pWait->fResEvents)
1681 {
1682 pWait->fResEvents = pWait->fReqEvents & fEvents;
1683 fEvents &= ~pWait->fResEvents;
1684 rc |= RTSemEventMultiSignal(pWait->Event);
1685 if (!fEvents)
1686 break;
1687 }
1688 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
1689 }
1690 else /* something is serious wrong... */
1691 Log(("VBoxGuestCommonISR: acknowledge events failed rc=%d (events=%#x)!!\n",
1692 pReq->header.rc, pReq->events));
1693 }
1694 else
1695 LogFlow(("VBoxGuestCommonISR: not ours\n"));
1696
1697 /*
1698 * Work the poll and async notification queues on OSes that implements that.
1699 * Do this outside the spinlock to prevent some recursive spinlocking.
1700 */
1701 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1702
1703 if (fMousePositionChanged)
1704 {
1705 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
1706 VBoxGuestNativeISRMousePollEvent(pDevExt);
1707 }
1708
1709 Assert(rc == 0);
1710 return fOurIrq;
1711}
1712
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette