VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 21836

Last change on this file since 21836 was 21632, checked in by vboxsync, 16 years ago

VBoxGuest.cpp: A bit more logging in vboxGuestInitFixateGuestMappings.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 60.5 KB
Line 
1/* $Id: VBoxGuest.cpp 21632 2009-07-16 08:53:13Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2009 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23
24/*******************************************************************************
25* Header Files *
26*******************************************************************************/
27#define LOG_GROUP LOG_GROUP_DEFAULT
28#include "VBoxGuestInternal.h"
29#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
30#include <VBox/log.h>
31#include <iprt/mem.h>
32#include <iprt/time.h>
33#include <iprt/memobj.h>
34#include <iprt/asm.h>
35#include <iprt/string.h>
36#include <iprt/process.h>
37#include <iprt/assert.h>
38#include <iprt/param.h>
39#ifdef VBOX_WITH_HGCM
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Internal Functions *
46*******************************************************************************/
47#ifdef VBOX_WITH_HGCM
48static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
49#endif
50
51
52
53/**
54 * Reserves memory in which the VMM can relocate any guest mappings
55 * that are floating around.
56 *
57 * This operation is a little bit tricky since the VMM might not accept
58 * just any address because of address clashes between the three contexts
59 * it operates in, so use a small stack to perform this operation.
60 *
61 * @returns VBox status code (ignored).
62 * @param pDevExt The device extension.
63 */
64static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
65{
66 /*
67 * Query the required space.
68 */
69 VMMDevReqHypervisorInfo *pReq;
70 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
71 if (RT_FAILURE(rc))
72 return rc;
73 pReq->hypervisorStart = 0;
74 pReq->hypervisorSize = 0;
75 rc = VbglGRPerform(&pReq->header);
76 if (RT_FAILURE(rc)) /* this shouldn't happen! */
77 {
78 VbglGRFree(&pReq->header);
79 return rc;
80 }
81
82 /*
83 * The VMM will report back if there is nothing it wants to map, like for
84 * insance in VT-x and AMD-V mode.
85 */
86 if (pReq->hypervisorSize == 0)
87 Log(("vboxGuestInitFixateGuestMappings: nothing to do\n"));
88 else
89 {
90 /*
91 * We have to try several times since the host can be picky
92 * about certain addresses.
93 */
94 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
95 uint32_t cbHypervisor = pReq->hypervisorSize;
96 RTR0MEMOBJ ahTries[5];
97 uint32_t iTry;
98 bool fBitched = false;
99 Log(("vboxGuestInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
100 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
101 {
102 /*
103 * Reserve space, or if that isn't supported, create a object for
104 * some fictive physical memory and map that in to kernel space.
105 *
106 * To make the code a bit uglier, most systems cannot help with
107 * 4MB alignment, so we have to deal with that in addition to
108 * having two ways of getting the memory.
109 */
110 uint32_t uAlignment = _4M;
111 RTR0MEMOBJ hObj;
112 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
113 if (rc == VERR_NOT_SUPPORTED)
114 {
115 uAlignment = PAGE_SIZE;
116 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
117 }
118 if (rc == VERR_NOT_SUPPORTED)
119 {
120 if (hFictive == NIL_RTR0MEMOBJ)
121 {
122 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M);
123 if (RT_FAILURE(rc))
124 break;
125 hFictive = hObj;
126 }
127 uAlignment = _4M;
128 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
129 if (rc == VERR_NOT_SUPPORTED)
130 {
131 uAlignment = PAGE_SIZE;
132 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
133 }
134 }
135 if (RT_FAILURE(rc))
136 {
137 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
138 rc, cbHypervisor, uAlignment, iTry));
139 fBitched = true;
140 break;
141 }
142
143 /*
144 * Try set it.
145 */
146 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
147 pReq->header.rc = VERR_INTERNAL_ERROR;
148 pReq->hypervisorSize = cbHypervisor;
149 pReq->hypervisorStart = (uintptr_t)RTR0MemObjAddress(hObj);
150 if ( uAlignment == PAGE_SIZE
151 && pReq->hypervisorStart & (_4M - 1))
152 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
153 (pReq->hypervisorStart | (_4M - 1)) + 1;
154 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
155
156 rc = VbglGRPerform(&pReq->header);
157 if (RT_SUCCESS(rc))
158 {
159 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
160 LogRel(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
161 RTR0MemObjAddress(pDevExt->hGuestMappings),
162 RTR0MemObjSize(pDevExt->hGuestMappings),
163 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
164 break;
165 }
166 ahTries[iTry] = hObj;
167 }
168
169 /*
170 * Cleanup failed attempts.
171 */
172 while (iTry-- > 0)
173 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
174 if ( RT_FAILURE(rc)
175 && hFictive != NIL_RTR0PTR)
176 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
177 if (RT_FAILURE(rc) && !fBitched)
178 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
179 }
180 VbglGRFree(&pReq->header);
181
182 /*
183 * We ignore failed attempts for now.
184 */
185 return VINF_SUCCESS;
186}
187
188
189/**
190 * Undo what vboxGuestInitFixateGuestMappings did.
191 *
192 * @param pDevExt The device extension.
193 */
194static void vboxGuestTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
195{
196 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
197 {
198 /*
199 * Tell the host that we're going to free the memory we reserved for
200 * it, the free it up. (Leak the memory if anything goes wrong here.)
201 */
202 VMMDevReqHypervisorInfo *pReq;
203 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
204 if (RT_SUCCESS(rc))
205 {
206 pReq->hypervisorStart = 0;
207 pReq->hypervisorSize = 0;
208 rc = VbglGRPerform(&pReq->header);
209 VbglGRFree(&pReq->header);
210 }
211 if (RT_SUCCESS(rc))
212 {
213 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
214 AssertRC(rc);
215 }
216 else
217 LogRel(("vboxGuestTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
218
219 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
220 }
221}
222
223
224/**
225 * Sets the interrupt filter mask during initialization and termination.
226 *
227 * This will ASSUME that we're the ones in carge over the mask, so
228 * we'll simply clear all bits we don't set.
229 *
230 * @returns VBox status code (ignored).
231 * @param pDevExt The device extension.
232 * @param fMask The new mask.
233 */
234static int vboxGuestSetFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask)
235{
236 VMMDevCtlGuestFilterMask *pReq;
237 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
238 if (RT_SUCCESS(rc))
239 {
240 pReq->u32OrMask = fMask;
241 pReq->u32NotMask = ~fMask;
242 rc = VbglGRPerform(&pReq->header);
243 if ( RT_FAILURE(rc)
244 || RT_FAILURE(pReq->header.rc))
245 LogRel(("vboxGuestSetFilterMask: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
246 rc, pReq->header.rc));
247 VbglGRFree(&pReq->header);
248 }
249 return rc;
250}
251
252
253/**
254 * Report guest information to the VMMDev.
255 *
256 * @returns VBox status code.
257 * @param pDevExt The device extension.
258 * @param enmOSType The OS type to report.
259 */
260static int vboxGuestInitReportGuestInfo(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
261{
262 VMMDevReportGuestInfo *pReq;
263 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_ReportGuestInfo);
264 if (RT_SUCCESS(rc))
265 {
266 pReq->guestInfo.additionsVersion = VMMDEV_VERSION;
267 pReq->guestInfo.osType = enmOSType;
268 rc = VbglGRPerform(&pReq->header);
269 if ( RT_FAILURE(rc)
270 || RT_FAILURE(pReq->header.rc))
271 LogRel(("vboxGuestInitReportGuestInfo: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
272 rc, pReq->header.rc));
273 VbglGRFree(&pReq->header);
274 }
275 return rc;
276}
277
278
279/**
280 * Initializes the VBoxGuest device extension when the
281 * device driver is loaded.
282 *
283 * The native code locates the VMMDev on the PCI bus and retrieve
284 * the MMIO and I/O port ranges, this function will take care of
285 * mapping the MMIO memory (if present). Upon successful return
286 * the native code should set up the interrupt handler.
287 *
288 * @returns VBox status code.
289 *
290 * @param pDevExt The device extension. Allocated by the native code.
291 * @param IOPortBase The base of the I/O port range.
292 * @param pvMMIOBase The base of the MMIO memory mapping.
293 * This is optional, pass NULL if not present.
294 * @param cbMMIO The size of the MMIO memory mapping.
295 * This is optional, pass 0 if not present.
296 * @param enmOSType The guest OS type to report to the VMMDev.
297 * @param fFixedEvents Events that will be enabled upon init and no client
298 * will ever be allowed to mask.
299 */
300int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
301 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
302{
303 int rc, rc2;
304
305 /*
306 * Adjust fFixedEvents.
307 */
308#ifdef VBOX_WITH_HGCM
309 fFixedEvents |= VMMDEV_EVENT_HGCM;
310#endif
311
312 /*
313 * Initalize the data.
314 */
315 pDevExt->IOPortBase = IOPortBase;
316 pDevExt->pVMMDevMemory = NULL;
317 pDevExt->fFixedEvents = fFixedEvents;
318 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
319 pDevExt->pIrqAckEvents = NULL;
320 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
321 pDevExt->WaitList.pHead = NULL;
322 pDevExt->WaitList.pTail = NULL;
323#ifdef VBOX_WITH_HGCM
324 pDevExt->HGCMWaitList.pHead = NULL;
325 pDevExt->HGCMWaitList.pTail = NULL;
326#endif
327 pDevExt->FreeList.pHead = NULL;
328 pDevExt->FreeList.pTail = NULL;
329 pDevExt->f32PendingEvents = 0;
330 pDevExt->u32ClipboardClientId = 0;
331 pDevExt->u32MousePosChangedSeq = 0;
332
333 /*
334 * If there is an MMIO region validate the version and size.
335 */
336 if (pvMMIOBase)
337 {
338 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
339 Assert(cbMMIO);
340 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
341 && pVMMDev->u32Size >= 32
342 && pVMMDev->u32Size <= cbMMIO)
343 {
344 pDevExt->pVMMDevMemory = pVMMDev;
345 Log(("VBoxGuestInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
346 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
347 }
348 else /* try live without it. */
349 LogRel(("VBoxGuestInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
350 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
351 }
352
353 /*
354 * Create the wait and seesion spinlocks.
355 */
356 rc = RTSpinlockCreate(&pDevExt->EventSpinlock);
357 if (RT_SUCCESS(rc))
358 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock);
359 if (RT_FAILURE(rc))
360 {
361 Log(("VBoxGuestInitDevExt: failed to spinlock, rc=%d!\n", rc));
362 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
363 RTSpinlockDestroy(pDevExt->EventSpinlock);
364 return rc;
365 }
366
367 /*
368 * Initialize the guest library and report the guest info back to VMMDev,
369 * set the interrupt control filter mask, and fixate the guest mappings
370 * made by the VMM.
371 */
372 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
373 if (RT_SUCCESS(rc))
374 {
375 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
376 if (RT_SUCCESS(rc))
377 {
378 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
379 Assert(pDevExt->PhysIrqAckEvents != 0);
380
381 rc = vboxGuestInitReportGuestInfo(pDevExt, enmOSType);
382 if (RT_SUCCESS(rc))
383 {
384 rc = vboxGuestSetFilterMask(pDevExt, fFixedEvents);
385 if (RT_SUCCESS(rc))
386 {
387 /*
388 * Disable guest graphics capability by default. The guest specific
389 * graphics driver will re-enable this when it is necessary.
390 */
391 rc = VBoxGuestSetGuestCapabilities(0, VMMDEV_GUEST_SUPPORTS_GRAPHICS);
392 if (RT_SUCCESS(rc))
393 {
394 vboxGuestInitFixateGuestMappings(pDevExt);
395 Log(("VBoxGuestInitDevExt: returns success\n"));
396 return VINF_SUCCESS;
397 }
398 }
399 }
400
401 /* failure cleanup */
402 }
403 else
404 Log(("VBoxGuestInitDevExt: VBoxGRAlloc failed, rc=%Rrc\n", rc));
405
406 VbglTerminate();
407 }
408 else
409 Log(("VBoxGuestInitDevExt: VbglInit failed, rc=%Rrc\n", rc));
410
411 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
412 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
413 return rc; /* (failed) */
414}
415
416
417/**
418 * Deletes all the items in a wait chain.
419 * @param pWait The head of the chain.
420 */
421static void VBoxGuestDeleteWaitList(PVBOXGUESTWAITLIST pList)
422{
423 while (pList->pHead)
424 {
425 int rc2;
426 PVBOXGUESTWAIT pWait = pList->pHead;
427 pList->pHead = pWait->pNext;
428
429 pWait->pNext = NULL;
430 pWait->pPrev = NULL;
431 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
432 pWait->Event = NIL_RTSEMEVENTMULTI;
433 pWait->pSession = NULL;
434 RTMemFree(pWait);
435 }
436 pList->pHead = NULL;
437 pList->pTail = NULL;
438}
439
440
441/**
442 * Destroys the VBoxGuest device extension.
443 *
444 * The native code should call this before the driver is loaded,
445 * but don't call this on shutdown.
446 *
447 * @param pDevExt The device extension.
448 */
449void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
450{
451 int rc2;
452 Log(("VBoxGuestDeleteDevExt:\n"));
453 LogRel(("VBoxGuest: The additions driver is terminating.\n"));
454
455 /*
456 * Unfix the guest mappings, filter all events and clear
457 * all capabilities.
458 */
459 vboxGuestTermUnfixGuestMappings(pDevExt);
460 VBoxGuestSetGuestCapabilities(0, UINT32_MAX);
461 vboxGuestSetFilterMask(pDevExt, 0);
462
463 /*
464 * Cleanup resources.
465 */
466 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
467 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
468
469 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
470#ifdef VBOX_WITH_HGCM
471 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
472#endif
473 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
474
475 VbglTerminate();
476
477 pDevExt->pVMMDevMemory = NULL;
478
479 pDevExt->IOPortBase = 0;
480 pDevExt->pIrqAckEvents = NULL;
481}
482
483
484/**
485 * Creates a VBoxGuest user session.
486 *
487 * The native code calls this when a ring-3 client opens the device.
488 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
489 *
490 * @returns VBox status code.
491 * @param pDevExt The device extension.
492 * @param ppSession Where to store the session on success.
493 */
494int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
495{
496 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
497 if (RT_UNLIKELY(!pSession))
498 {
499 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
500 return VERR_NO_MEMORY;
501 }
502
503 pSession->Process = RTProcSelf();
504 pSession->R0Process = RTR0ProcHandleSelf();
505 pSession->pDevExt = pDevExt;
506
507 *ppSession = pSession;
508 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
509 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
510 return VINF_SUCCESS;
511}
512
513
514/**
515 * Creates a VBoxGuest kernel session.
516 *
517 * The native code calls this when a ring-0 client connects to the device.
518 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
519 *
520 * @returns VBox status code.
521 * @param pDevExt The device extension.
522 * @param ppSession Where to store the session on success.
523 */
524int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
525{
526 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
527 if (RT_UNLIKELY(!pSession))
528 {
529 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
530 return VERR_NO_MEMORY;
531 }
532
533 pSession->Process = NIL_RTPROCESS;
534 pSession->R0Process = NIL_RTR0PROCESS;
535 pSession->pDevExt = pDevExt;
536
537 *ppSession = pSession;
538 LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
539 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
540 return VINF_SUCCESS;
541}
542
543
544
545/**
546 * Closes a VBoxGuest session.
547 *
548 * @param pDevExt The device extension.
549 * @param pSession The session to close (and free).
550 */
551void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
552{
553 unsigned i; NOREF(i);
554 Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
555 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
556
557#ifdef VBOX_WITH_HGCM
558 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
559 if (pSession->aHGCMClientIds[i])
560 {
561 VBoxGuestHGCMDisconnectInfo Info;
562 Info.result = 0;
563 Info.u32ClientID = pSession->aHGCMClientIds[i];
564 pSession->aHGCMClientIds[i] = 0;
565 Log(("VBoxGuestCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
566 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
567 }
568#endif
569
570 pSession->pDevExt = NULL;
571 pSession->Process = NIL_RTPROCESS;
572 pSession->R0Process = NIL_RTR0PROCESS;
573 RTMemFree(pSession);
574}
575
576
577/**
578 * Links the wait-for-event entry into the tail of the given list.
579 *
580 * @param pList The list to link it into.
581 * @param pWait The wait for event entry to append.
582 */
583DECLINLINE(void) VBoxGuestWaitAppend(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
584{
585 const PVBOXGUESTWAIT pTail = pList->pTail;
586 pWait->pNext = NULL;
587 pWait->pPrev = pTail;
588 if (pTail)
589 pTail->pNext = pWait;
590 else
591 pList->pHead = pWait;
592 pList->pTail = pWait;
593}
594
595
596/**
597 * Unlinks the wait-for-event entry.
598 *
599 * @param pList The list to unlink it from.
600 * @param pWait The wait for event entry to unlink.
601 */
602DECLINLINE(void) VBoxGuestWaitUnlink(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
603{
604 const PVBOXGUESTWAIT pPrev = pWait->pPrev;
605 const PVBOXGUESTWAIT pNext = pWait->pNext;
606 if (pNext)
607 pNext->pPrev = pPrev;
608 else
609 pList->pTail = pPrev;
610 if (pPrev)
611 pPrev->pNext = pNext;
612 else
613 pList->pHead = pNext;
614}
615
616
617/**
618 * Allocates a wiat-for-event entry.
619 *
620 * @returns The wait-for-event entry.
621 * @param pDevExt The device extension.
622 * @param pSession The session that's allocating this. Can be NULL.
623 */
624static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
625{
626 /*
627 * Allocate it one way or the other.
628 */
629 PVBOXGUESTWAIT pWait = pDevExt->FreeList.pTail;
630 if (pWait)
631 {
632 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
633 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
634
635 pWait = pDevExt->FreeList.pTail;
636 if (pWait)
637 VBoxGuestWaitUnlink(&pDevExt->FreeList, pWait);
638
639 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
640 }
641 if (!pWait)
642 {
643 static unsigned s_cErrors = 0;
644 int rc;
645
646 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
647 if (!pWait)
648 {
649 if (s_cErrors++ < 32)
650 LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
651 return NULL;
652 }
653
654 rc = RTSemEventMultiCreate(&pWait->Event);
655 if (RT_FAILURE(rc))
656 {
657 if (s_cErrors++ < 32)
658 LogRel(("VBoxGuestCommonIOCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
659 RTMemFree(pWait);
660 return NULL;
661 }
662 }
663
664 /*
665 * Zero members just as an precaution.
666 */
667 pWait->pNext = NULL;
668 pWait->pPrev = NULL;
669 pWait->fReqEvents = 0;
670 pWait->fResEvents = 0;
671 pWait->pSession = pSession;
672#ifdef VBOX_WITH_HGCM
673 pWait->pHGCMReq = NULL;
674#endif
675 RTSemEventMultiReset(pWait->Event);
676 return pWait;
677}
678
679
680/**
681 * Frees the wait-for-event entry.
682 * The caller must own the wait spinlock!
683 *
684 * @param pDevExt The device extension.
685 * @param pWait The wait-for-event entry to free.
686 */
687static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
688{
689 pWait->fReqEvents = 0;
690 pWait->fResEvents = 0;
691#ifdef VBOX_WITH_HGCM
692 pWait->pHGCMReq = NULL;
693#endif
694 VBoxGuestWaitAppend(&pDevExt->FreeList, pWait);
695}
696
697
698/**
699 * Frees the wait-for-event entry.
700 *
701 * @param pDevExt The device extension.
702 * @param pWait The wait-for-event entry to free.
703 */
704static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
705{
706 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
707 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
708 VBoxGuestWaitFreeLocked(pDevExt, pWait);
709 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
710}
711
712
713/**
714 * Modifies the guest capabilities.
715 *
716 * Should be called during driver init and termination.
717 *
718 * @returns VBox status code.
719 * @param fOr The Or mask (what to enable).
720 * @param fNot The Not mask (what to disable).
721 */
722int VBoxGuestSetGuestCapabilities(uint32_t fOr, uint32_t fNot)
723{
724 VMMDevReqGuestCapabilities2 *pReq;
725 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
726 if (RT_FAILURE(rc))
727 {
728 Log(("VBoxGuestSetGuestCapabilities: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
729 sizeof(*pReq), sizeof(*pReq), rc));
730 return rc;
731 }
732
733 pReq->u32OrMask = fOr;
734 pReq->u32NotMask = fNot;
735
736 rc = VbglGRPerform(&pReq->header);
737 if (RT_FAILURE(rc))
738 Log(("VBoxGuestSetGuestCapabilities:VbglGRPerform failed, rc=%Rrc!\n", rc));
739 else if (RT_FAILURE(pReq->header.rc))
740 {
741 Log(("VBoxGuestSetGuestCapabilities: The request failed; VMMDev rc=%Rrc!\n", pReq->header.rc));
742 rc = pReq->header.rc;
743 }
744
745 VbglGRFree(&pReq->header);
746 return rc;
747}
748
749
750/**
751 * Implements the fast (no input or output) type of IOCtls.
752 *
753 * This is currently just a placeholder stub inherited from the support driver code.
754 *
755 * @returns VBox status code.
756 * @param iFunction The IOCtl function number.
757 * @param pDevExt The device extension.
758 * @param pSession The session.
759 */
760int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
761{
762 Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
763
764 return VERR_NOT_SUPPORTED;
765}
766
767
768
769static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
770{
771 Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
772 pInfo->portAddress = pDevExt->IOPortBase;
773 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
774 if (pcbDataReturned)
775 *pcbDataReturned = sizeof(*pInfo);
776 return VINF_SUCCESS;
777}
778
779
780/**
781 * Worker VBoxGuestCommonIOCtl_WaitEvent.
782 * The caller enters the spinlock, we may or may not leave it.
783 *
784 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
785 */
786DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWaitEventInfo *pInfo,
787 int iEvent, const uint32_t fReqEvents, PRTSPINLOCKTMP pTmp)
788{
789 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
790 if (fMatches)
791 {
792 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
793 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, pTmp);
794
795 pInfo->u32EventFlagsOut = fMatches;
796 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
797 if (fReqEvents & ~((uint32_t)1 << iEvent))
798 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
799 else
800 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
801 return VINF_SUCCESS;
802 }
803 return VERR_TIMEOUT;
804}
805
806
807static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
808 VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned, bool fInterruptible)
809{
810 pInfo->u32EventFlagsOut = 0;
811 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
812 if (pcbDataReturned)
813 *pcbDataReturned = sizeof(*pInfo);
814
815 /*
816 * Copy and verify the input mask.
817 */
818 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
819 int iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
820 if (RT_UNLIKELY(iEvent < 0))
821 {
822 Log(("VBoxGuestCommonIOCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
823 return VERR_INVALID_PARAMETER;
824 }
825
826 /*
827 * Check the condition up front, before doing the wait-for-event allocations.
828 */
829 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
830 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
831 int rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
832 if (rc == VINF_SUCCESS)
833 return rc;
834 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
835
836 if (!pInfo->u32TimeoutIn)
837 {
838 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
839 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
840 return VERR_TIMEOUT;
841 }
842
843 PVBOXGUESTWAIT pWait = VBoxGuestWaitAlloc(pDevExt, pSession);
844 if (!pWait)
845 return VERR_NO_MEMORY;
846 pWait->fReqEvents = fReqEvents;
847
848 /*
849 * We've got the wait entry now, re-enter the spinlock and check for the condition.
850 * If the wait condition is met, return.
851 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
852 */
853 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
854 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
855 if (rc == VINF_SUCCESS)
856 {
857 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
858 return rc;
859 }
860 VBoxGuestWaitAppend(&pDevExt->WaitList, pWait);
861 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
862
863 if (fInterruptible)
864 rc = RTSemEventMultiWaitNoResume(pWait->Event,
865 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
866 else
867 rc = RTSemEventMultiWait(pWait->Event,
868 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
869
870 /*
871 * There is one special case here and that's when the semaphore is
872 * destroyed upon device driver unload. This shouldn't happen of course,
873 * but in case it does, just get out of here ASAP.
874 */
875 if (rc == VERR_SEM_DESTROYED)
876 return rc;
877
878 /*
879 * Unlink the wait item and dispose of it.
880 */
881 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
882 VBoxGuestWaitUnlink(&pDevExt->WaitList, pWait);
883 const uint32_t fResEvents = pWait->fResEvents;
884 VBoxGuestWaitFreeLocked(pDevExt, pWait);
885 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
886
887 /*
888 * Now deal with the return code.
889 */
890 if ( fResEvents
891 && fResEvents != UINT32_MAX)
892 {
893 pInfo->u32EventFlagsOut = fResEvents;
894 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
895 if (fReqEvents & ~((uint32_t)1 << iEvent))
896 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
897 else
898 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
899 rc = VINF_SUCCESS;
900 }
901 else if ( fResEvents == UINT32_MAX
902 || rc == VERR_INTERRUPTED)
903 {
904 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
905 rc == VERR_INTERRUPTED;
906 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
907 }
908 else if (rc == VERR_TIMEOUT)
909 {
910 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
911 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
912 }
913 else
914 {
915 if (RT_SUCCESS(rc))
916 {
917 static unsigned s_cErrors = 0;
918 if (s_cErrors++ < 32)
919 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
920 rc = VERR_INTERNAL_ERROR;
921 }
922 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
923 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc\n", rc));
924 }
925
926 return rc;
927}
928
929
930static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
931{
932 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
933 PVBOXGUESTWAIT pWait;
934 int rc = 0;
935
936 Log(("VBoxGuestCommonIOCtl: CANCEL_ALL_WAITEVENTS\n"));
937
938 /*
939 * Walk the event list and wake up anyone with a matching session.
940 */
941 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
942 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
943 if (pWait->pSession == pSession)
944 {
945 pWait->fResEvents = UINT32_MAX;
946 rc |= RTSemEventMultiSignal(pWait->Event);
947 }
948 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
949 Assert(rc == 0);
950
951 return VINF_SUCCESS;
952}
953
954
955static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
956 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
957{
958 Log(("VBoxGuestCommonIOCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
959
960 /*
961 * Validate the header and request size.
962 */
963 const VMMDevRequestType enmType = pReqHdr->requestType;
964 const uint32_t cbReq = pReqHdr->size;
965 const uint32_t cbMinSize = vmmdevGetRequestSize(enmType);
966 if (cbReq < cbMinSize)
967 {
968 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
969 cbReq, cbMinSize, enmType));
970 return VERR_INVALID_PARAMETER;
971 }
972 if (cbReq > cbData)
973 {
974 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
975 cbData, cbReq, enmType));
976 return VERR_INVALID_PARAMETER;
977 }
978
979 /*
980 * Make a copy of the request in the physical memory heap so
981 * the VBoxGuestLibrary can more easily deal with the request.
982 * (This is really a waste of time since the OS or the OS specific
983 * code has already buffered or locked the input/output buffer, but
984 * it does makes things a bit simpler wrt to phys address.)
985 */
986 VMMDevRequestHeader *pReqCopy;
987 int rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
988 if (RT_FAILURE(rc))
989 {
990 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
991 cbReq, cbReq, rc));
992 return rc;
993 }
994 memcpy(pReqCopy, pReqHdr, cbReq);
995
996 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
997 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
998
999 rc = VbglGRPerform(pReqCopy);
1000 if ( RT_SUCCESS(rc)
1001 && RT_SUCCESS(pReqCopy->rc))
1002 {
1003 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
1004 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
1005
1006 memcpy(pReqHdr, pReqCopy, cbReq);
1007 if (pcbDataReturned)
1008 *pcbDataReturned = cbReq;
1009 }
1010 else if (RT_FAILURE(rc))
1011 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
1012 else
1013 {
1014 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
1015 rc = pReqCopy->rc;
1016 }
1017
1018 VbglGRFree(pReqCopy);
1019 return rc;
1020}
1021
1022
1023static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, VBoxGuestFilterMaskInfo *pInfo)
1024{
1025 VMMDevCtlGuestFilterMask *pReq;
1026 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
1027 if (RT_FAILURE(rc))
1028 {
1029 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
1030 sizeof(*pReq), sizeof(*pReq), rc));
1031 return rc;
1032 }
1033
1034 pReq->u32OrMask = pInfo->u32OrMask;
1035 pReq->u32NotMask = pInfo->u32NotMask;
1036 pReq->u32NotMask &= ~pDevExt->fFixedEvents; /* don't permit these to be cleared! */
1037 rc = VbglGRPerform(&pReq->header);
1038 if (RT_FAILURE(rc))
1039 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: VbglGRPerform failed, rc=%Rrc!\n", rc));
1040 else if (RT_FAILURE(pReq->header.rc))
1041 {
1042 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: The request failed; VMMDev rc=%Rrc!\n", pReq->header.rc));
1043 rc = pReq->header.rc;
1044 }
1045
1046 VbglGRFree(&pReq->header);
1047 return rc;
1048}
1049
1050#ifdef VBOX_WITH_HGCM
1051
1052AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
1053
1054/** Worker for VBoxGuestHGCMAsyncWaitCallback*. */
1055static int VBoxGuestHGCMAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
1056 bool fInterruptible, uint32_t cMillies)
1057{
1058
1059 /*
1060 * Check to see if the condition was met by the time we got here.
1061 *
1062 * We create a simple poll loop here for dealing with out-of-memory
1063 * conditions since the caller isn't necessarily able to deal with
1064 * us returning too early.
1065 */
1066 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1067 PVBOXGUESTWAIT pWait;
1068 for (;;)
1069 {
1070 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1071 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1072 {
1073 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1074 return VINF_SUCCESS;
1075 }
1076 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1077
1078 pWait = VBoxGuestWaitAlloc(pDevExt, NULL);
1079 if (pWait)
1080 break;
1081 if (fInterruptible)
1082 return VERR_INTERRUPTED;
1083 RTThreadSleep(1);
1084 }
1085 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
1086 pWait->pHGCMReq = pHdr;
1087
1088 /*
1089 * Re-enter the spinlock and re-check for the condition.
1090 * If the condition is met, return.
1091 * Otherwise link us into the HGCM wait list and go to sleep.
1092 */
1093 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1094 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1095 {
1096 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1097 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1098 return VINF_SUCCESS;
1099 }
1100 VBoxGuestWaitAppend(&pDevExt->HGCMWaitList, pWait);
1101 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1102
1103 int rc;
1104 if (fInterruptible)
1105 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
1106 else
1107 rc = RTSemEventMultiWait(pWait->Event, cMillies);
1108 if (rc == VERR_SEM_DESTROYED)
1109 return rc;
1110
1111 /*
1112 * Unlink, free and return.
1113 */
1114 if ( RT_FAILURE(rc)
1115 && rc != VERR_TIMEOUT
1116 && ( !fInterruptible
1117 || rc != VERR_INTERRUPTED))
1118 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
1119
1120 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1121 VBoxGuestWaitUnlink(&pDevExt->HGCMWaitList, pWait);
1122 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1123 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1124 return rc;
1125}
1126
1127
1128/**
1129 * This is a callback for dealing with async waits.
1130 *
1131 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1132 */
1133static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
1134{
1135 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1136 Log(("VBoxGuestHGCMAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
1137 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1138 pDevExt,
1139 false /* fInterruptible */,
1140 u32User /* cMillies */);
1141}
1142
1143
1144/**
1145 * This is a callback for dealing with async waits with a timeout.
1146 *
1147 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1148 */
1149static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr,
1150 void *pvUser, uint32_t u32User)
1151{
1152 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1153 Log(("VBoxGuestHGCMAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
1154 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1155 pDevExt,
1156 true /* fInterruptible */,
1157 u32User /* cMillies */ );
1158
1159}
1160
1161
1162static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMConnectInfo *pInfo,
1163 size_t *pcbDataReturned)
1164{
1165 /*
1166 * The VbglHGCMConnect call will invoke the callback if the HGCM
1167 * call is performed in an ASYNC fashion. The function is not able
1168 * to deal with cancelled requests.
1169 */
1170 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
1171 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
1172 ? pInfo->Loc.u.host.achName : "<not local host>"));
1173
1174 int rc = VbglR0HGCMInternalConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1175 if (RT_SUCCESS(rc))
1176 {
1177 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
1178 pInfo->u32ClientID, pInfo->result, rc));
1179 if (RT_SUCCESS(pInfo->result))
1180 {
1181 /*
1182 * Append the client id to the client id table.
1183 * If the table has somehow become filled up, we'll disconnect the session.
1184 */
1185 unsigned i;
1186 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1187 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1188 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1189 if (!pSession->aHGCMClientIds[i])
1190 {
1191 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
1192 break;
1193 }
1194 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1195 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1196 {
1197 static unsigned s_cErrors = 0;
1198 if (s_cErrors++ < 32)
1199 LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
1200
1201 VBoxGuestHGCMDisconnectInfo Info;
1202 Info.result = 0;
1203 Info.u32ClientID = pInfo->u32ClientID;
1204 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1205 return VERR_TOO_MANY_OPEN_FILES;
1206 }
1207 }
1208 if (pcbDataReturned)
1209 *pcbDataReturned = sizeof(*pInfo);
1210 }
1211 return rc;
1212}
1213
1214
1215static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
1216 size_t *pcbDataReturned)
1217{
1218 /*
1219 * Validate the client id and invalidate its entry while we're in the call.
1220 */
1221 const uint32_t u32ClientId = pInfo->u32ClientID;
1222 unsigned i;
1223 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1224 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1225 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1226 if (pSession->aHGCMClientIds[i] == u32ClientId)
1227 {
1228 pSession->aHGCMClientIds[i] = UINT32_MAX;
1229 break;
1230 }
1231 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1232 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1233 {
1234 static unsigned s_cErrors = 0;
1235 if (s_cErrors++ > 32)
1236 LogRel(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
1237 return VERR_INVALID_HANDLE;
1238 }
1239
1240 /*
1241 * The VbglHGCMConnect call will invoke the callback if the HGCM
1242 * call is performed in an ASYNC fashion. The function is not able
1243 * to deal with cancelled requests.
1244 */
1245 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
1246 int rc = VbglR0HGCMInternalDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1247 if (RT_SUCCESS(rc))
1248 {
1249 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
1250 if (pcbDataReturned)
1251 *pcbDataReturned = sizeof(*pInfo);
1252 }
1253
1254 /* Update the client id array according to the result. */
1255 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1256 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
1257 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
1258 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1259
1260 return rc;
1261}
1262
1263
1264static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt,
1265 PVBOXGUESTSESSION pSession,
1266 VBoxGuestHGCMCallInfo *pInfo,
1267 uint32_t cMillies, bool fInterruptible, bool f32bit,
1268 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
1269{
1270 /*
1271 * Some more validations.
1272 */
1273 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
1274 {
1275 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
1276 return VERR_INVALID_PARAMETER;
1277 }
1278 size_t cbActual = cbExtra + sizeof(*pInfo);
1279#ifdef RT_ARCH_AMD64
1280 if (f32bit)
1281 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
1282 else
1283#endif
1284 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
1285 if (cbData < cbActual)
1286 {
1287 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
1288 cbData, cbActual));
1289 return VERR_INVALID_PARAMETER;
1290 }
1291
1292 /*
1293 * Validate the client id.
1294 */
1295 const uint32_t u32ClientId = pInfo->u32ClientID;
1296 unsigned i;
1297 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1298 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1299 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1300 if (pSession->aHGCMClientIds[i] == u32ClientId)
1301 break;
1302 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1303 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
1304 {
1305 static unsigned s_cErrors = 0;
1306 if (s_cErrors++ > 32)
1307 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
1308 return VERR_INVALID_HANDLE;
1309 }
1310
1311 /*
1312 * The VbglHGCMCall call will invoke the callback if the HGCM
1313 * call is performed in an ASYNC fashion. This function can
1314 * deal with cancelled requests, so we let user more requests
1315 * be interruptible (should add a flag for this later I guess).
1316 */
1317 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
1318 int rc;
1319 uint32_t fFlags = pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
1320#ifdef RT_ARCH_AMD64
1321 if (f32bit)
1322 {
1323 if (fInterruptible)
1324 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1325 else
1326 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1327 }
1328 else
1329#endif
1330 {
1331 if (fInterruptible)
1332 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1333 else
1334 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1335 }
1336 if (RT_SUCCESS(rc))
1337 {
1338 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
1339 if (pcbDataReturned)
1340 *pcbDataReturned = cbActual;
1341 }
1342 else
1343 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: Failed. rc=%Rrc.\n", rc));
1344 return rc;
1345}
1346
1347
1348/**
1349 * @returns VBox status code. Unlike the other HGCM IOCtls this will combine
1350 * the VbglHGCMConnect/Disconnect return code with the Info.result.
1351 */
1352static int VBoxGuestCommonIOCtl_HGCMClipboardReConnect(PVBOXGUESTDEVEXT pDevExt, uint32_t *pu32ClientId, size_t *pcbDataReturned)
1353{
1354 int rc;
1355 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: Current u32ClientId=%RX32\n", pDevExt->u32ClipboardClientId));
1356
1357
1358 /*
1359 * If there is an old client, try disconnect it first.
1360 */
1361 if (pDevExt->u32ClipboardClientId != 0)
1362 {
1363 VBoxGuestHGCMDisconnectInfo Info;
1364 Info.result = VERR_WRONG_ORDER;
1365 Info.u32ClientID = pDevExt->u32ClipboardClientId;
1366 rc = VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1367 if (RT_SUCCESS(rc))
1368 {
1369 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. VbglHGCMDisconnect -> rc=%Rrc\n", rc));
1370 return rc;
1371 }
1372 if (RT_FAILURE((int32_t)Info.result))
1373 {
1374 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. Info.result=%Rrc\n", rc));
1375 return Info.result;
1376 }
1377 pDevExt->u32ClipboardClientId = 0;
1378 }
1379
1380 /*
1381 * Try connect.
1382 */
1383 VBoxGuestHGCMConnectInfo Info;
1384 Info.Loc.type = VMMDevHGCMLoc_LocalHost_Existing;
1385 strcpy(Info.Loc.u.host.achName, "VBoxSharedClipboard");
1386 Info.u32ClientID = 0;
1387 Info.result = VERR_WRONG_ORDER;
1388
1389 rc = VbglR0HGCMInternalConnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1390 if (RT_FAILURE(rc))
1391 {
1392 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1393 return rc;
1394 }
1395 if (RT_FAILURE(Info.result))
1396 {
1397 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1398 return rc;
1399 }
1400
1401 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: connected successfully u32ClientId=%RX32\n", Info.u32ClientID));
1402
1403 pDevExt->u32ClipboardClientId = Info.u32ClientID;
1404 *pu32ClientId = Info.u32ClientID;
1405 if (pcbDataReturned)
1406 *pcbDataReturned = sizeof(uint32_t);
1407
1408 return VINF_SUCCESS;
1409}
1410
1411#endif /* VBOX_WITH_HGCM */
1412
1413/**
1414 * Guest backdoor logging.
1415 *
1416 * @returns VBox status code.
1417 *
1418 * @param pch The log message (need not be NULL terminated).
1419 * @param cbData Size of the buffer.
1420 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
1421 */
1422static int VBoxGuestCommonIOCtl_Log(const char *pch, size_t cbData, size_t *pcbDataReturned)
1423{
1424 Log(("%.*s", cbData, pch));
1425 if (pcbDataReturned)
1426 *pcbDataReturned = 0;
1427 return VINF_SUCCESS;
1428}
1429
1430
1431/**
1432 * Common IOCtl for user to kernel and kernel to kernel communcation.
1433 *
1434 * This function only does the basic validation and then invokes
1435 * worker functions that takes care of each specific function.
1436 *
1437 * @returns VBox status code.
1438 *
1439 * @param iFunction The requested function.
1440 * @param pDevExt The device extension.
1441 * @param pSession The client session.
1442 * @param pvData The input/output data buffer. Can be NULL depending on the function.
1443 * @param cbData The max size of the data buffer.
1444 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
1445 */
1446int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1447 void *pvData, size_t cbData, size_t *pcbDataReturned)
1448{
1449 Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
1450 iFunction, pDevExt, pSession, pvData, cbData));
1451
1452 /*
1453 * Make sure the returned data size is set to zero.
1454 */
1455 if (pcbDataReturned)
1456 *pcbDataReturned = 0;
1457
1458 /*
1459 * Define some helper macros to simplify validation.
1460 */
1461#define CHECKRET_RING0(mnemonic) \
1462 do { \
1463 if (pSession->R0Process != NIL_RTR0PROCESS) \
1464 { \
1465 Log(("VBoxGuestCommonIOCtl: " mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
1466 pSession->Process, (uintptr_t)pSession->R0Process)); \
1467 return VERR_PERMISSION_DENIED; \
1468 } \
1469 } while (0)
1470#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
1471 do { \
1472 if (cbData < (cbMin)) \
1473 { \
1474 Log(("VBoxGuestCommonIOCtl: " mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
1475 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
1476 return VERR_BUFFER_OVERFLOW; \
1477 } \
1478 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
1479 { \
1480 Log(("VBoxGuestCommonIOCtl: " mnemonic ": Invalid pointer %p\n", pvData)); \
1481 return VERR_INVALID_POINTER; \
1482 } \
1483 } while (0)
1484
1485
1486 /*
1487 * Deal with variably sized requests first.
1488 */
1489 int rc = VINF_SUCCESS;
1490 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
1491 {
1492 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
1493 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
1494 }
1495#ifdef VBOX_WITH_HGCM
1496 /*
1497 * These ones are a bit tricky.
1498 */
1499 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
1500 {
1501 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
1502 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
1503 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
1504 fInterruptible, false /*f32bit*/,
1505 0, cbData, pcbDataReturned);
1506 }
1507 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
1508 {
1509 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
1510 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
1511 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
1512 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
1513 false /*f32bit*/,
1514 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
1515 }
1516# ifdef RT_ARCH_AMD64
1517 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
1518 {
1519 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
1520 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
1521 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
1522 fInterruptible, true /*f32bit*/,
1523 0, cbData, pcbDataReturned);
1524 }
1525 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
1526 {
1527 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
1528 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
1529 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
1530 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
1531 true /*f32bit*/,
1532 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
1533 }
1534# endif
1535#endif /* VBOX_WITH_HGCM */
1536 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
1537 {
1538 CHECKRET_MIN_SIZE("LOG", 1);
1539 rc = VBoxGuestCommonIOCtl_Log((char *)pvData, cbData, pcbDataReturned);
1540 }
1541 else
1542 {
1543 switch (iFunction)
1544 {
1545 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
1546 CHECKRET_RING0("GETVMMDEVPORT");
1547 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
1548 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
1549 break;
1550
1551 case VBOXGUEST_IOCTL_WAITEVENT:
1552 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
1553 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, pSession, (VBoxGuestWaitEventInfo *)pvData,
1554 pcbDataReturned, pSession->R0Process != NIL_RTR0PROCESS);
1555 break;
1556
1557 case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
1558 if (cbData != 0)
1559 rc = VERR_INVALID_PARAMETER;
1560 rc = VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
1561 break;
1562
1563 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
1564 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
1565 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, (VBoxGuestFilterMaskInfo *)pvData);
1566 break;
1567
1568#ifdef VBOX_WITH_HGCM
1569 case VBOXGUEST_IOCTL_HGCM_CONNECT:
1570# ifdef RT_ARCH_AMD64
1571 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
1572# endif
1573 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
1574 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
1575 break;
1576
1577 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
1578# ifdef RT_ARCH_AMD64
1579 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
1580# endif
1581 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
1582 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
1583 break;
1584
1585 case VBOXGUEST_IOCTL_CLIPBOARD_CONNECT:
1586 CHECKRET_MIN_SIZE("CLIPBOARD_CONNECT", sizeof(uint32_t));
1587 rc = VBoxGuestCommonIOCtl_HGCMClipboardReConnect(pDevExt, (uint32_t *)pvData, pcbDataReturned);
1588 break;
1589#endif /* VBOX_WITH_HGCM */
1590
1591 default:
1592 {
1593 Log(("VBoxGuestCommonIOCtl: Unknown request iFunction=%#x Stripped size=%#x\n", iFunction,
1594 VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
1595 rc = VERR_NOT_SUPPORTED;
1596 break;
1597 }
1598 }
1599 }
1600
1601 Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
1602 return rc;
1603}
1604
1605
1606
1607/**
1608 * Common interrupt service routine.
1609 *
1610 * This deals with events and with waking up thread waiting for those events.
1611 *
1612 * @returns true if it was our interrupt, false if it wasn't.
1613 * @param pDevExt The VBoxGuest device extension.
1614 */
1615bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
1616{
1617 bool fMousePositionChanged = false;
1618 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1619 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
1620 int rc = 0;
1621 bool fOurIrq;
1622
1623 /*
1624 * Make sure we've initalized the device extension.
1625 */
1626 if (RT_UNLIKELY(!pReq))
1627 return false;
1628
1629 /*
1630 * Enter the spinlock and check if it's our IRQ or not.
1631 */
1632 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1633 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
1634 if (fOurIrq)
1635 {
1636 /*
1637 * Acknowlegde events.
1638 * We don't use VbglGRPerform here as it may take another spinlocks.
1639 */
1640 pReq->header.rc = VERR_INTERNAL_ERROR;
1641 pReq->events = 0;
1642 ASMCompilerBarrier();
1643 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
1644 ASMCompilerBarrier(); /* paranoia */
1645 if (RT_SUCCESS(pReq->header.rc))
1646 {
1647 uint32_t fEvents = pReq->events;
1648 PVBOXGUESTWAIT pWait;
1649
1650 Log(("VBoxGuestCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
1651
1652 /*
1653 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
1654 */
1655 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
1656 {
1657 fMousePositionChanged = true;
1658 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
1659 }
1660
1661#ifdef VBOX_WITH_HGCM
1662 /*
1663 * The HGCM event/list is kind of different in that we evaluate all entries.
1664 */
1665 if (fEvents & VMMDEV_EVENT_HGCM)
1666 {
1667 for (pWait = pDevExt->HGCMWaitList.pHead; pWait; pWait = pWait->pNext)
1668 if ( !pWait->fResEvents
1669 && (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE))
1670 {
1671 pWait->fResEvents = VMMDEV_EVENT_HGCM;
1672 rc |= RTSemEventMultiSignal(pWait->Event);
1673 }
1674 fEvents &= ~VMMDEV_EVENT_HGCM;
1675 }
1676#endif
1677
1678 /*
1679 * Normal FIFO waiter evaluation.
1680 */
1681 fEvents |= pDevExt->f32PendingEvents;
1682 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
1683 if ( (pWait->fReqEvents & fEvents)
1684 && !pWait->fResEvents)
1685 {
1686 pWait->fResEvents = pWait->fReqEvents & fEvents;
1687 fEvents &= ~pWait->fResEvents;
1688 rc |= RTSemEventMultiSignal(pWait->Event);
1689 if (!fEvents)
1690 break;
1691 }
1692 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
1693 }
1694 else /* something is serious wrong... */
1695 Log(("VBoxGuestCommonISR: acknowledge events failed rc=%d (events=%#x)!!\n",
1696 pReq->header.rc, pReq->events));
1697 }
1698 else
1699 LogFlow(("VBoxGuestCommonISR: not ours\n"));
1700
1701 /*
1702 * Work the poll and async notification queues on OSes that implements that.
1703 * Do this outside the spinlock to prevent some recursive spinlocking.
1704 */
1705 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1706
1707 if (fMousePositionChanged)
1708 {
1709 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
1710 VBoxGuestNativeISRMousePollEvent(pDevExt);
1711 }
1712
1713 Assert(rc == 0);
1714 return fOurIrq;
1715}
1716
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette