VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 21498

Last change on this file since 21498 was 21498, checked in by vboxsync, 16 years ago

VBoxGuest.cpp: Implemented fixating the guest mappings. Also, clear capabilites and mask all events during unload.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 60.1 KB
Line 
1/* $Id: VBoxGuest.cpp 21498 2009-07-10 20:26:23Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2009 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23
24/*******************************************************************************
25* Header Files *
26*******************************************************************************/
27#define LOG_GROUP LOG_GROUP_DEFAULT
28#include "VBoxGuestInternal.h"
29#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
30#include <VBox/log.h>
31#include <iprt/mem.h>
32#include <iprt/time.h>
33#include <iprt/memobj.h>
34#include <iprt/asm.h>
35#include <iprt/string.h>
36#include <iprt/process.h>
37#include <iprt/assert.h>
38#include <iprt/param.h>
39#ifdef VBOX_WITH_HGCM
40# include <iprt/thread.h>
41#endif
42
43
44/*******************************************************************************
45* Internal Functions *
46*******************************************************************************/
47#ifdef VBOX_WITH_HGCM
48static DECLCALLBACK(void) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
49#endif
50
51
52
53/**
54 * Reserves memory in which the VMM can relocate any guest mappings
55 * that are floating around.
56 *
57 * This operation is a little bit tricky since the VMM might not accept
58 * just any address because of address clashes between the three contexts
59 * it operates in, so use a small stack to perform this operation.
60 *
61 * @returns VBox status code (ignored).
62 * @param pDevExt The device extension.
63 */
64static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
65{
66 /*
67 * Query the required space.
68 */
69 VMMDevReqHypervisorInfo *pReq;
70 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
71 if (RT_FAILURE(rc))
72 return rc;
73 pReq->hypervisorStart = 0;
74 pReq->hypervisorSize = 0;
75 rc = VbglGRPerform(&pReq->header);
76 if (RT_FAILURE(rc)) /* this shouldn't happen! */
77 {
78 VbglGRFree(&pReq->header);
79 return rc;
80 }
81
82 /*
83 * The VMM will report back if there is nothing it wants to map, like for
84 * insance in VT-x and AMD-V mode.
85 */
86 if (pReq->hypervisorSize == 0)
87 Log(("vboxGuestInitFixateGuestMappings: nothing to do\n"));
88 else
89 {
90 /*
91 * We have to try several times since the host can be picky
92 * about certain addresses.
93 */
94 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
95 uint32_t cbHypervisor = pReq->hypervisorSize;
96 RTR0MEMOBJ ahTries[5];
97 uint32_t iTry;
98 Log(("vboxGuestInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
99 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
100 {
101 /*
102 * Reserve space, or if that isn't supported, create a object for
103 * some fictive physical memory and map that in to kernel space.
104 *
105 * To make the code a bit uglier, most systems cannot help with
106 * 4MB alignment, so we have to deal with that in addition to
107 * having two ways of getting the memory.
108 */
109 uint32_t uAlignment = _4M;
110 RTR0MEMOBJ hObj;
111 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
112 if (rc == VERR_NOT_SUPPORTED)
113 {
114 uAlignment = PAGE_SIZE;
115 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
116 }
117 if (rc == VERR_NOT_SUPPORTED)
118 {
119 if (hFictive == NIL_RTR0MEMOBJ)
120 {
121 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M);
122 if (RT_FAILURE(rc))
123 break;
124 hFictive = hObj;
125 }
126 uAlignment = _4M;
127 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
128 if (rc == VERR_NOT_SUPPORTED)
129 {
130 uAlignment = PAGE_SIZE;
131 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
132 }
133 }
134 if (RT_FAILURE(rc))
135 {
136 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
137 rc, cbHypervisor, uAlignment, iTry));
138 break;
139 }
140
141 /*
142 * Try set it.
143 */
144 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
145 pReq->header.rc = VERR_INTERNAL_ERROR;
146 pReq->hypervisorSize = cbHypervisor;
147 pReq->hypervisorStart = (uintptr_t)RTR0MemObjAddress(hObj);
148 if ( uAlignment == PAGE_SIZE
149 && pReq->hypervisorStart & (_4M - 1))
150 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
151 (pReq->hypervisorStart | (_4M - 1)) + 1;
152 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
153
154 rc = VbglGRPerform(&pReq->header);
155 if (RT_SUCCESS(rc))
156 {
157 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
158 Log(("vboxGuestInitFixateGuestMappings: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
159 RTR0MemObjAddress(pDevExt->hGuestMappings),
160 RTR0MemObjSize(pDevExt->hGuestMappings),
161 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
162 break;
163 }
164 ahTries[iTry] = hObj;
165 }
166
167 /*
168 * Cleanup failed attempts.
169 */
170 while (iTry-- > 0)
171 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
172 if ( RT_FAILURE(rc)
173 && hFictive != NIL_RTR0PTR)
174 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
175 }
176 VbglGRFree(&pReq->header);
177
178 /*
179 * We ignore failed attempts for now.
180 */
181 return VINF_SUCCESS;
182}
183
184
185/**
186 * Undo what vboxGuestInitFixateGuestMappings did.
187 *
188 * @param pDevExt The device extension.
189 */
190static void vboxGuestTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
191{
192 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
193 {
194 /*
195 * Tell the host that we're going to free the memory we reserved for
196 * it, the free it up. (Leak the memory if anything goes wrong here.)
197 */
198 VMMDevReqHypervisorInfo *pReq;
199 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
200 if (RT_SUCCESS(rc))
201 {
202 pReq->hypervisorStart = 0;
203 pReq->hypervisorSize = 0;
204 rc = VbglGRPerform(&pReq->header);
205 VbglGRFree(&pReq->header);
206 }
207 if (RT_SUCCESS(rc))
208 {
209 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
210 AssertRC(rc);
211 }
212 else
213 LogRel(("vboxGuestTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
214
215 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
216 }
217}
218
219
220/**
221 * Sets the interrupt filter mask during initialization and termination.
222 *
223 * This will ASSUME that we're the ones in carge over the mask, so
224 * we'll simply clear all bits we don't set.
225 *
226 * @returns VBox status code (ignored).
227 * @param pDevExt The device extension.
228 * @param fMask The new mask.
229 */
230static int vboxGuestSetFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask)
231{
232 VMMDevCtlGuestFilterMask *pReq;
233 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
234 if (RT_SUCCESS(rc))
235 {
236 pReq->u32OrMask = fMask;
237 pReq->u32NotMask = ~fMask;
238 rc = VbglGRPerform(&pReq->header);
239 if ( RT_FAILURE(rc)
240 || RT_FAILURE(pReq->header.rc))
241 LogRel(("vboxGuestSetFilterMask: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
242 rc, pReq->header.rc));
243 VbglGRFree(&pReq->header);
244 }
245 return rc;
246}
247
248
249/**
250 * Report guest information to the VMMDev.
251 *
252 * @returns VBox status code.
253 * @param pDevExt The device extension.
254 * @param enmOSType The OS type to report.
255 */
256static int vboxGuestInitReportGuestInfo(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
257{
258 VMMDevReportGuestInfo *pReq;
259 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_ReportGuestInfo);
260 if (RT_SUCCESS(rc))
261 {
262 pReq->guestInfo.additionsVersion = VMMDEV_VERSION;
263 pReq->guestInfo.osType = enmOSType;
264 rc = VbglGRPerform(&pReq->header);
265 if ( RT_FAILURE(rc)
266 || RT_FAILURE(pReq->header.rc))
267 LogRel(("vboxGuestInitReportGuestInfo: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
268 rc, pReq->header.rc));
269 VbglGRFree(&pReq->header);
270 }
271 return rc;
272}
273
274
275/**
276 * Initializes the VBoxGuest device extension when the
277 * device driver is loaded.
278 *
279 * The native code locates the VMMDev on the PCI bus and retrieve
280 * the MMIO and I/O port ranges, this function will take care of
281 * mapping the MMIO memory (if present). Upon successful return
282 * the native code should set up the interrupt handler.
283 *
284 * @returns VBox status code.
285 *
286 * @param pDevExt The device extension. Allocated by the native code.
287 * @param IOPortBase The base of the I/O port range.
288 * @param pvMMIOBase The base of the MMIO memory mapping.
289 * This is optional, pass NULL if not present.
290 * @param cbMMIO The size of the MMIO memory mapping.
291 * This is optional, pass 0 if not present.
292 * @param enmOSType The guest OS type to report to the VMMDev.
293 * @param fFixedEvents Events that will be enabled upon init and no client
294 * will ever be allowed to mask.
295 */
296int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
297 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
298{
299 int rc, rc2;
300
301 /*
302 * Adjust fFixedEvents.
303 */
304#ifdef VBOX_WITH_HGCM
305 fFixedEvents |= VMMDEV_EVENT_HGCM;
306#endif
307
308 /*
309 * Initalize the data.
310 */
311 pDevExt->IOPortBase = IOPortBase;
312 pDevExt->pVMMDevMemory = NULL;
313 pDevExt->fFixedEvents = fFixedEvents;
314 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
315 pDevExt->pIrqAckEvents = NULL;
316 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
317 pDevExt->WaitList.pHead = NULL;
318 pDevExt->WaitList.pTail = NULL;
319#ifdef VBOX_WITH_HGCM
320 pDevExt->HGCMWaitList.pHead = NULL;
321 pDevExt->HGCMWaitList.pTail = NULL;
322#endif
323 pDevExt->FreeList.pHead = NULL;
324 pDevExt->FreeList.pTail = NULL;
325 pDevExt->f32PendingEvents = 0;
326 pDevExt->u32ClipboardClientId = 0;
327 pDevExt->u32MousePosChangedSeq = 0;
328
329 /*
330 * If there is an MMIO region validate the version and size.
331 */
332 if (pvMMIOBase)
333 {
334 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
335 Assert(cbMMIO);
336 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
337 && pVMMDev->u32Size >= 32
338 && pVMMDev->u32Size <= cbMMIO)
339 {
340 pDevExt->pVMMDevMemory = pVMMDev;
341 Log(("VBoxGuestInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
342 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
343 }
344 else /* try live without it. */
345 LogRel(("VBoxGuestInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
346 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
347 }
348
349 /*
350 * Create the wait and seesion spinlocks.
351 */
352 rc = RTSpinlockCreate(&pDevExt->EventSpinlock);
353 if (RT_SUCCESS(rc))
354 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock);
355 if (RT_FAILURE(rc))
356 {
357 Log(("VBoxGuestInitDevExt: failed to spinlock, rc=%d!\n", rc));
358 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
359 RTSpinlockDestroy(pDevExt->EventSpinlock);
360 return rc;
361 }
362
363 /*
364 * Initialize the guest library and report the guest info back to VMMDev,
365 * set the interrupt control filter mask, and fixate the guest mappings
366 * made by the VMM.
367 */
368 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
369 if (RT_SUCCESS(rc))
370 {
371 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
372 if (RT_SUCCESS(rc))
373 {
374 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
375 Assert(pDevExt->PhysIrqAckEvents != 0);
376
377 rc = vboxGuestInitReportGuestInfo(pDevExt, enmOSType);
378 if (RT_SUCCESS(rc))
379 {
380 rc = vboxGuestSetFilterMask(pDevExt, fFixedEvents);
381 if (RT_SUCCESS(rc))
382 {
383 /*
384 * Disable guest graphics capability by default. The guest specific
385 * graphics driver will re-enable this when it is necessary.
386 */
387 rc = VBoxGuestSetGuestCapabilities(0, VMMDEV_GUEST_SUPPORTS_GRAPHICS);
388 if (RT_SUCCESS(rc))
389 {
390 vboxGuestInitFixateGuestMappings(pDevExt);
391 Log(("VBoxGuestInitDevExt: returns success\n"));
392 return VINF_SUCCESS;
393 }
394 }
395 }
396
397 /* failure cleanup */
398 }
399 else
400 Log(("VBoxGuestInitDevExt: VBoxGRAlloc failed, rc=%Rrc\n", rc));
401
402 VbglTerminate();
403 }
404 else
405 Log(("VBoxGuestInitDevExt: VbglInit failed, rc=%Rrc\n", rc));
406
407 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
408 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
409 return rc; /* (failed) */
410}
411
412
413/**
414 * Deletes all the items in a wait chain.
415 * @param pWait The head of the chain.
416 */
417static void VBoxGuestDeleteWaitList(PVBOXGUESTWAITLIST pList)
418{
419 while (pList->pHead)
420 {
421 int rc2;
422 PVBOXGUESTWAIT pWait = pList->pHead;
423 pList->pHead = pWait->pNext;
424
425 pWait->pNext = NULL;
426 pWait->pPrev = NULL;
427 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
428 pWait->Event = NIL_RTSEMEVENTMULTI;
429 pWait->pSession = NULL;
430 RTMemFree(pWait);
431 }
432 pList->pHead = NULL;
433 pList->pTail = NULL;
434}
435
436
437/**
438 * Destroys the VBoxGuest device extension.
439 *
440 * The native code should call this before the driver is loaded,
441 * but don't call this on shutdown.
442 *
443 * @param pDevExt The device extension.
444 */
445void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
446{
447 int rc2;
448 Log(("VBoxGuestDeleteDevExt:\n"));
449 LogRel(("VBoxGuest: The additions driver is terminating.\n"));
450
451 /*
452 * Unfix the guest mappings, filter all events and clear
453 * all capabilities.
454 */
455 vboxGuestTermUnfixGuestMappings(pDevExt);
456 VBoxGuestSetGuestCapabilities(0, UINT32_MAX);
457 vboxGuestSetFilterMask(pDevExt, 0);
458
459 /*
460 * Cleanup resources.
461 */
462 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
463 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
464
465 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
466#ifdef VBOX_WITH_HGCM
467 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
468#endif
469 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
470
471 VbglTerminate();
472
473 pDevExt->pVMMDevMemory = NULL;
474
475 pDevExt->IOPortBase = 0;
476 pDevExt->pIrqAckEvents = NULL;
477}
478
479
480/**
481 * Creates a VBoxGuest user session.
482 *
483 * The native code calls this when a ring-3 client opens the device.
484 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
485 *
486 * @returns VBox status code.
487 * @param pDevExt The device extension.
488 * @param ppSession Where to store the session on success.
489 */
490int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
491{
492 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
493 if (RT_UNLIKELY(!pSession))
494 {
495 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
496 return VERR_NO_MEMORY;
497 }
498
499 pSession->Process = RTProcSelf();
500 pSession->R0Process = RTR0ProcHandleSelf();
501 pSession->pDevExt = pDevExt;
502
503 *ppSession = pSession;
504 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
505 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
506 return VINF_SUCCESS;
507}
508
509
510/**
511 * Creates a VBoxGuest kernel session.
512 *
513 * The native code calls this when a ring-0 client connects to the device.
514 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
515 *
516 * @returns VBox status code.
517 * @param pDevExt The device extension.
518 * @param ppSession Where to store the session on success.
519 */
520int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
521{
522 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
523 if (RT_UNLIKELY(!pSession))
524 {
525 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
526 return VERR_NO_MEMORY;
527 }
528
529 pSession->Process = NIL_RTPROCESS;
530 pSession->R0Process = NIL_RTR0PROCESS;
531 pSession->pDevExt = pDevExt;
532
533 *ppSession = pSession;
534 LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
535 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
536 return VINF_SUCCESS;
537}
538
539
540
541/**
542 * Closes a VBoxGuest session.
543 *
544 * @param pDevExt The device extension.
545 * @param pSession The session to close (and free).
546 */
547void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
548{
549 unsigned i; NOREF(i);
550 Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
551 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
552
553#ifdef VBOX_WITH_HGCM
554 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
555 if (pSession->aHGCMClientIds[i])
556 {
557 VBoxGuestHGCMDisconnectInfo Info;
558 Info.result = 0;
559 Info.u32ClientID = pSession->aHGCMClientIds[i];
560 pSession->aHGCMClientIds[i] = 0;
561 Log(("VBoxGuestCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
562 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
563 }
564#endif
565
566 pSession->pDevExt = NULL;
567 pSession->Process = NIL_RTPROCESS;
568 pSession->R0Process = NIL_RTR0PROCESS;
569 RTMemFree(pSession);
570}
571
572
573/**
574 * Links the wait-for-event entry into the tail of the given list.
575 *
576 * @param pList The list to link it into.
577 * @param pWait The wait for event entry to append.
578 */
579DECLINLINE(void) VBoxGuestWaitAppend(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
580{
581 const PVBOXGUESTWAIT pTail = pList->pTail;
582 pWait->pNext = NULL;
583 pWait->pPrev = pTail;
584 if (pTail)
585 pTail->pNext = pWait;
586 else
587 pList->pHead = pWait;
588 pList->pTail = pWait;
589}
590
591
592/**
593 * Unlinks the wait-for-event entry.
594 *
595 * @param pList The list to unlink it from.
596 * @param pWait The wait for event entry to unlink.
597 */
598DECLINLINE(void) VBoxGuestWaitUnlink(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
599{
600 const PVBOXGUESTWAIT pPrev = pWait->pPrev;
601 const PVBOXGUESTWAIT pNext = pWait->pNext;
602 if (pNext)
603 pNext->pPrev = pPrev;
604 else
605 pList->pTail = pPrev;
606 if (pPrev)
607 pPrev->pNext = pNext;
608 else
609 pList->pHead = pNext;
610}
611
612
613/**
614 * Allocates a wiat-for-event entry.
615 *
616 * @returns The wait-for-event entry.
617 * @param pDevExt The device extension.
618 * @param pSession The session that's allocating this. Can be NULL.
619 */
620static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
621{
622 /*
623 * Allocate it one way or the other.
624 */
625 PVBOXGUESTWAIT pWait = pDevExt->FreeList.pTail;
626 if (pWait)
627 {
628 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
629 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
630
631 pWait = pDevExt->FreeList.pTail;
632 if (pWait)
633 VBoxGuestWaitUnlink(&pDevExt->FreeList, pWait);
634
635 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
636 }
637 if (!pWait)
638 {
639 static unsigned s_cErrors = 0;
640 int rc;
641
642 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
643 if (!pWait)
644 {
645 if (s_cErrors++ < 32)
646 LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
647 return NULL;
648 }
649
650 rc = RTSemEventMultiCreate(&pWait->Event);
651 if (RT_FAILURE(rc))
652 {
653 if (s_cErrors++ < 32)
654 LogRel(("VBoxGuestCommonIOCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
655 RTMemFree(pWait);
656 return NULL;
657 }
658 }
659
660 /*
661 * Zero members just as an precaution.
662 */
663 pWait->pNext = NULL;
664 pWait->pPrev = NULL;
665 pWait->fReqEvents = 0;
666 pWait->fResEvents = 0;
667 pWait->pSession = pSession;
668#ifdef VBOX_WITH_HGCM
669 pWait->pHGCMReq = NULL;
670#endif
671 RTSemEventMultiReset(pWait->Event);
672 return pWait;
673}
674
675
676/**
677 * Frees the wait-for-event entry.
678 * The caller must own the wait spinlock!
679 *
680 * @param pDevExt The device extension.
681 * @param pWait The wait-for-event entry to free.
682 */
683static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
684{
685 pWait->fReqEvents = 0;
686 pWait->fResEvents = 0;
687#ifdef VBOX_WITH_HGCM
688 pWait->pHGCMReq = NULL;
689#endif
690 VBoxGuestWaitAppend(&pDevExt->FreeList, pWait);
691}
692
693
694/**
695 * Frees the wait-for-event entry.
696 *
697 * @param pDevExt The device extension.
698 * @param pWait The wait-for-event entry to free.
699 */
700static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
701{
702 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
703 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
704 VBoxGuestWaitFreeLocked(pDevExt, pWait);
705 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
706}
707
708
709/**
710 * Modifies the guest capabilities.
711 *
712 * Should be called during driver init and termination.
713 *
714 * @returns VBox status code.
715 * @param fOr The Or mask (what to enable).
716 * @param fNot The Not mask (what to disable).
717 */
718int VBoxGuestSetGuestCapabilities(uint32_t fOr, uint32_t fNot)
719{
720 VMMDevReqGuestCapabilities2 *pReq;
721 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
722 if (RT_FAILURE(rc))
723 {
724 Log(("VBoxGuestSetGuestCapabilities: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
725 sizeof(*pReq), sizeof(*pReq), rc));
726 return rc;
727 }
728
729 pReq->u32OrMask = fOr;
730 pReq->u32NotMask = fNot;
731
732 rc = VbglGRPerform(&pReq->header);
733 if (RT_FAILURE(rc))
734 Log(("VBoxGuestSetGuestCapabilities:VbglGRPerform failed, rc=%Rrc!\n", rc));
735 else if (RT_FAILURE(pReq->header.rc))
736 {
737 Log(("VBoxGuestSetGuestCapabilities: The request failed; VMMDev rc=%Rrc!\n", pReq->header.rc));
738 rc = pReq->header.rc;
739 }
740
741 VbglGRFree(&pReq->header);
742 return rc;
743}
744
745
746/**
747 * Implements the fast (no input or output) type of IOCtls.
748 *
749 * This is currently just a placeholder stub inherited from the support driver code.
750 *
751 * @returns VBox status code.
752 * @param iFunction The IOCtl function number.
753 * @param pDevExt The device extension.
754 * @param pSession The session.
755 */
756int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
757{
758 Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
759
760 return VERR_NOT_SUPPORTED;
761}
762
763
764
765static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
766{
767 Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
768 pInfo->portAddress = pDevExt->IOPortBase;
769 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
770 if (pcbDataReturned)
771 *pcbDataReturned = sizeof(*pInfo);
772 return VINF_SUCCESS;
773}
774
775
776/**
777 * Worker VBoxGuestCommonIOCtl_WaitEvent.
778 * The caller enters the spinlock, we may or may not leave it.
779 *
780 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
781 */
782DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWaitEventInfo *pInfo,
783 int iEvent, const uint32_t fReqEvents, PRTSPINLOCKTMP pTmp)
784{
785 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
786 if (fMatches)
787 {
788 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
789 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, pTmp);
790
791 pInfo->u32EventFlagsOut = fMatches;
792 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
793 if (fReqEvents & ~((uint32_t)1 << iEvent))
794 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
795 else
796 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
797 return VINF_SUCCESS;
798 }
799 return VERR_TIMEOUT;
800}
801
802
803static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
804 VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned, bool fInterruptible)
805{
806 pInfo->u32EventFlagsOut = 0;
807 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
808 if (pcbDataReturned)
809 *pcbDataReturned = sizeof(*pInfo);
810
811 /*
812 * Copy and verify the input mask.
813 */
814 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
815 int iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
816 if (RT_UNLIKELY(iEvent < 0))
817 {
818 Log(("VBoxGuestCommonIOCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
819 return VERR_INVALID_PARAMETER;
820 }
821
822 /*
823 * Check the condition up front, before doing the wait-for-event allocations.
824 */
825 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
826 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
827 int rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
828 if (rc == VINF_SUCCESS)
829 return rc;
830 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
831
832 if (!pInfo->u32TimeoutIn)
833 {
834 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
835 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
836 return VERR_TIMEOUT;
837 }
838
839 PVBOXGUESTWAIT pWait = VBoxGuestWaitAlloc(pDevExt, pSession);
840 if (!pWait)
841 return VERR_NO_MEMORY;
842 pWait->fReqEvents = fReqEvents;
843
844 /*
845 * We've got the wait entry now, re-enter the spinlock and check for the condition.
846 * If the wait condition is met, return.
847 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
848 */
849 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
850 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
851 if (rc == VINF_SUCCESS)
852 {
853 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
854 return rc;
855 }
856 VBoxGuestWaitAppend(&pDevExt->WaitList, pWait);
857 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
858
859 if (fInterruptible)
860 rc = RTSemEventMultiWaitNoResume(pWait->Event,
861 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
862 else
863 rc = RTSemEventMultiWait(pWait->Event,
864 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
865
866 /*
867 * There is one special case here and that's when the semaphore is
868 * destroyed upon device driver unload. This shouldn't happen of course,
869 * but in case it does, just get out of here ASAP.
870 */
871 if (rc == VERR_SEM_DESTROYED)
872 return rc;
873
874 /*
875 * Unlink the wait item and dispose of it.
876 */
877 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
878 VBoxGuestWaitUnlink(&pDevExt->WaitList, pWait);
879 const uint32_t fResEvents = pWait->fResEvents;
880 VBoxGuestWaitFreeLocked(pDevExt, pWait);
881 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
882
883 /*
884 * Now deal with the return code.
885 */
886 if ( fResEvents
887 && fResEvents != UINT32_MAX)
888 {
889 pInfo->u32EventFlagsOut = fResEvents;
890 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
891 if (fReqEvents & ~((uint32_t)1 << iEvent))
892 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
893 else
894 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
895 rc = VINF_SUCCESS;
896 }
897 else if ( fResEvents == UINT32_MAX
898 || rc == VERR_INTERRUPTED)
899 {
900 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
901 rc == VERR_INTERRUPTED;
902 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
903 }
904 else if (rc == VERR_TIMEOUT)
905 {
906 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
907 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
908 }
909 else
910 {
911 if (RT_SUCCESS(rc))
912 {
913 static unsigned s_cErrors = 0;
914 if (s_cErrors++ < 32)
915 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
916 rc = VERR_INTERNAL_ERROR;
917 }
918 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
919 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc\n", rc));
920 }
921
922 return rc;
923}
924
925
926static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
927{
928 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
929 PVBOXGUESTWAIT pWait;
930 int rc = 0;
931
932 Log(("VBoxGuestCommonIOCtl: CANCEL_ALL_WAITEVENTS\n"));
933
934 /*
935 * Walk the event list and wake up anyone with a matching session.
936 */
937 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
938 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
939 if (pWait->pSession == pSession)
940 {
941 pWait->fResEvents = UINT32_MAX;
942 rc |= RTSemEventMultiSignal(pWait->Event);
943 }
944 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
945 Assert(rc == 0);
946
947 return VINF_SUCCESS;
948}
949
950
951static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
952 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
953{
954 Log(("VBoxGuestCommonIOCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
955
956 /*
957 * Validate the header and request size.
958 */
959 const VMMDevRequestType enmType = pReqHdr->requestType;
960 const uint32_t cbReq = pReqHdr->size;
961 const uint32_t cbMinSize = vmmdevGetRequestSize(enmType);
962 if (cbReq < cbMinSize)
963 {
964 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
965 cbReq, cbMinSize, enmType));
966 return VERR_INVALID_PARAMETER;
967 }
968 if (cbReq > cbData)
969 {
970 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
971 cbData, cbReq, enmType));
972 return VERR_INVALID_PARAMETER;
973 }
974
975 /*
976 * Make a copy of the request in the physical memory heap so
977 * the VBoxGuestLibrary can more easily deal with the request.
978 * (This is really a waste of time since the OS or the OS specific
979 * code has already buffered or locked the input/output buffer, but
980 * it does makes things a bit simpler wrt to phys address.)
981 */
982 VMMDevRequestHeader *pReqCopy;
983 int rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
984 if (RT_FAILURE(rc))
985 {
986 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
987 cbReq, cbReq, rc));
988 return rc;
989 }
990 memcpy(pReqCopy, pReqHdr, cbReq);
991
992 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
993 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
994
995 rc = VbglGRPerform(pReqCopy);
996 if ( RT_SUCCESS(rc)
997 && RT_SUCCESS(pReqCopy->rc))
998 {
999 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
1000 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
1001
1002 memcpy(pReqHdr, pReqCopy, cbReq);
1003 if (pcbDataReturned)
1004 *pcbDataReturned = cbReq;
1005 }
1006 else if (RT_FAILURE(rc))
1007 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
1008 else
1009 {
1010 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
1011 rc = pReqCopy->rc;
1012 }
1013
1014 VbglGRFree(pReqCopy);
1015 return rc;
1016}
1017
1018
1019static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, VBoxGuestFilterMaskInfo *pInfo)
1020{
1021 VMMDevCtlGuestFilterMask *pReq;
1022 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
1023 if (RT_FAILURE(rc))
1024 {
1025 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
1026 sizeof(*pReq), sizeof(*pReq), rc));
1027 return rc;
1028 }
1029
1030 pReq->u32OrMask = pInfo->u32OrMask;
1031 pReq->u32NotMask = pInfo->u32NotMask;
1032 pReq->u32NotMask &= ~pDevExt->fFixedEvents; /* don't permit these to be cleared! */
1033 rc = VbglGRPerform(&pReq->header);
1034 if (RT_FAILURE(rc))
1035 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: VbglGRPerform failed, rc=%Rrc!\n", rc));
1036 else if (RT_FAILURE(pReq->header.rc))
1037 {
1038 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: The request failed; VMMDev rc=%Rrc!\n", pReq->header.rc));
1039 rc = pReq->header.rc;
1040 }
1041
1042 VbglGRFree(&pReq->header);
1043 return rc;
1044}
1045
1046#ifdef VBOX_WITH_HGCM
1047
1048AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
1049
1050/** Worker for VBoxGuestHGCMAsyncWaitCallback*. */
1051static void VBoxGuestHGCMAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
1052 bool fInterruptible, uint32_t cMillies)
1053{
1054
1055 /*
1056 * Check to see if the condition was met by the time we got here.
1057 *
1058 * We create a simple poll loop here for dealing with out-of-memory
1059 * conditions since the caller isn't necessarily able to deal with
1060 * us returning too early.
1061 */
1062 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1063 PVBOXGUESTWAIT pWait;
1064 for (;;)
1065 {
1066 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1067 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1068 {
1069 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1070 return;
1071 }
1072 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1073
1074 pWait = VBoxGuestWaitAlloc(pDevExt, NULL);
1075 if (pWait)
1076 break;
1077 if (fInterruptible)
1078 return;
1079 RTThreadSleep(1);
1080 }
1081 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
1082 pWait->pHGCMReq = pHdr;
1083
1084 /*
1085 * Re-enter the spinlock and re-check for the condition.
1086 * If the condition is met, return.
1087 * Otherwise link us into the HGCM wait list and go to sleep.
1088 */
1089 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1090 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1091 {
1092 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1093 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1094 return;
1095 }
1096 VBoxGuestWaitAppend(&pDevExt->HGCMWaitList, pWait);
1097 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1098
1099 int rc;
1100 if (fInterruptible)
1101 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
1102 else
1103 rc = RTSemEventMultiWait(pWait->Event, cMillies);
1104
1105 /*
1106 * This shouldn't ever return failure...
1107 * Unlink, free and return.
1108 */
1109 if (rc == VERR_SEM_DESTROYED)
1110 return;
1111 if (RT_FAILURE(rc))
1112 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
1113
1114 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1115 VBoxGuestWaitUnlink(&pDevExt->HGCMWaitList, pWait);
1116 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1117 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1118}
1119
1120
1121/**
1122 * This is a callback for dealing with async waits.
1123 *
1124 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1125 */
1126static DECLCALLBACK(void) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
1127{
1128 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1129 Log(("VBoxGuestHGCMAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
1130 VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1131 pDevExt,
1132 false /* fInterruptible */,
1133 u32User /* cMillies */);
1134}
1135
1136
1137/**
1138 * This is a callback for dealing with async waits with a timeout.
1139 *
1140 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1141 */
1142static DECLCALLBACK(void) VBoxGuestHGCMAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr,
1143 void *pvUser, uint32_t u32User)
1144{
1145 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1146 Log(("VBoxGuestHGCMAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
1147 VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1148 pDevExt,
1149 true /* fInterruptible */,
1150 u32User /* cMillies */ );
1151}
1152
1153
1154static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMConnectInfo *pInfo,
1155 size_t *pcbDataReturned)
1156{
1157 /*
1158 * The VbglHGCMConnect call will invoke the callback if the HGCM
1159 * call is performed in an ASYNC fashion. The function is not able
1160 * to deal with cancelled requests.
1161 */
1162 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
1163 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
1164 ? pInfo->Loc.u.host.achName : "<not local host>"));
1165
1166 int rc = VbglR0HGCMInternalConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1167 if (RT_SUCCESS(rc))
1168 {
1169 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
1170 pInfo->u32ClientID, pInfo->result, rc));
1171 if (RT_SUCCESS(pInfo->result))
1172 {
1173 /*
1174 * Append the client id to the client id table.
1175 * If the table has somehow become filled up, we'll disconnect the session.
1176 */
1177 unsigned i;
1178 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1179 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1180 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1181 if (!pSession->aHGCMClientIds[i])
1182 {
1183 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
1184 break;
1185 }
1186 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1187 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1188 {
1189 static unsigned s_cErrors = 0;
1190 if (s_cErrors++ < 32)
1191 LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
1192
1193 VBoxGuestHGCMDisconnectInfo Info;
1194 Info.result = 0;
1195 Info.u32ClientID = pInfo->u32ClientID;
1196 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1197 return VERR_TOO_MANY_OPEN_FILES;
1198 }
1199 }
1200 if (pcbDataReturned)
1201 *pcbDataReturned = sizeof(*pInfo);
1202 }
1203 return rc;
1204}
1205
1206
1207static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
1208 size_t *pcbDataReturned)
1209{
1210 /*
1211 * Validate the client id and invalidate its entry while we're in the call.
1212 */
1213 const uint32_t u32ClientId = pInfo->u32ClientID;
1214 unsigned i;
1215 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1216 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1217 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1218 if (pSession->aHGCMClientIds[i] == u32ClientId)
1219 {
1220 pSession->aHGCMClientIds[i] = UINT32_MAX;
1221 break;
1222 }
1223 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1224 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1225 {
1226 static unsigned s_cErrors = 0;
1227 if (s_cErrors++ > 32)
1228 LogRel(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
1229 return VERR_INVALID_HANDLE;
1230 }
1231
1232 /*
1233 * The VbglHGCMConnect call will invoke the callback if the HGCM
1234 * call is performed in an ASYNC fashion. The function is not able
1235 * to deal with cancelled requests.
1236 */
1237 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
1238 int rc = VbglR0HGCMInternalDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1239 if (RT_SUCCESS(rc))
1240 {
1241 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
1242 if (pcbDataReturned)
1243 *pcbDataReturned = sizeof(*pInfo);
1244 }
1245
1246 /* Update the client id array according to the result. */
1247 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1248 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
1249 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
1250 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1251
1252 return rc;
1253}
1254
1255
1256static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt,
1257 PVBOXGUESTSESSION pSession,
1258 VBoxGuestHGCMCallInfo *pInfo,
1259 uint32_t cMillies, bool fInterruptible, bool f32bit,
1260 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
1261{
1262 /*
1263 * Some more validations.
1264 */
1265 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
1266 {
1267 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
1268 return VERR_INVALID_PARAMETER;
1269 }
1270 size_t cbActual = cbExtra + sizeof(*pInfo);
1271#ifdef RT_ARCH_AMD64
1272 if (f32bit)
1273 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
1274 else
1275#endif
1276 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
1277 if (cbData < cbActual)
1278 {
1279 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
1280 cbData, cbActual));
1281 return VERR_INVALID_PARAMETER;
1282 }
1283
1284 /*
1285 * Validate the client id.
1286 */
1287 const uint32_t u32ClientId = pInfo->u32ClientID;
1288 unsigned i;
1289 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1290 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1291 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1292 if (pSession->aHGCMClientIds[i] == u32ClientId)
1293 break;
1294 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1295 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
1296 {
1297 static unsigned s_cErrors = 0;
1298 if (s_cErrors++ > 32)
1299 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
1300 return VERR_INVALID_HANDLE;
1301 }
1302
1303 /*
1304 * The VbglHGCMCall call will invoke the callback if the HGCM
1305 * call is performed in an ASYNC fashion. This function can
1306 * deal with cancelled requests, so we let user more requests
1307 * be interruptible (should add a flag for this later I guess).
1308 */
1309 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
1310 int rc;
1311 uint32_t fFlags = pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
1312#ifdef RT_ARCH_AMD64
1313 if (f32bit)
1314 {
1315 if (fInterruptible)
1316 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1317 else
1318 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1319 }
1320 else
1321#endif
1322 {
1323 if (fInterruptible)
1324 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1325 else
1326 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1327 }
1328 if (RT_SUCCESS(rc))
1329 {
1330 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
1331 if (pcbDataReturned)
1332 *pcbDataReturned = cbActual;
1333 }
1334 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: Failed. rc=%Rrc.\n", rc));
1335 return rc;
1336}
1337
1338
1339/**
1340 * @returns VBox status code. Unlike the other HGCM IOCtls this will combine
1341 * the VbglHGCMConnect/Disconnect return code with the Info.result.
1342 */
1343static int VBoxGuestCommonIOCtl_HGCMClipboardReConnect(PVBOXGUESTDEVEXT pDevExt, uint32_t *pu32ClientId, size_t *pcbDataReturned)
1344{
1345 int rc;
1346 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: Current u32ClientId=%RX32\n", pDevExt->u32ClipboardClientId));
1347
1348
1349 /*
1350 * If there is an old client, try disconnect it first.
1351 */
1352 if (pDevExt->u32ClipboardClientId != 0)
1353 {
1354 VBoxGuestHGCMDisconnectInfo Info;
1355 Info.result = VERR_WRONG_ORDER;
1356 Info.u32ClientID = pDevExt->u32ClipboardClientId;
1357 rc = VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1358 if (RT_SUCCESS(rc))
1359 {
1360 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. VbglHGCMDisconnect -> rc=%Rrc\n", rc));
1361 return rc;
1362 }
1363 if (RT_FAILURE((int32_t)Info.result))
1364 {
1365 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. Info.result=%Rrc\n", rc));
1366 return Info.result;
1367 }
1368 pDevExt->u32ClipboardClientId = 0;
1369 }
1370
1371 /*
1372 * Try connect.
1373 */
1374 VBoxGuestHGCMConnectInfo Info;
1375 Info.Loc.type = VMMDevHGCMLoc_LocalHost_Existing;
1376 strcpy(Info.Loc.u.host.achName, "VBoxSharedClipboard");
1377 Info.u32ClientID = 0;
1378 Info.result = VERR_WRONG_ORDER;
1379
1380 rc = VbglR0HGCMInternalConnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1381 if (RT_FAILURE(rc))
1382 {
1383 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1384 return rc;
1385 }
1386 if (RT_FAILURE(Info.result))
1387 {
1388 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1389 return rc;
1390 }
1391
1392 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: connected successfully u32ClientId=%RX32\n", Info.u32ClientID));
1393
1394 pDevExt->u32ClipboardClientId = Info.u32ClientID;
1395 *pu32ClientId = Info.u32ClientID;
1396 if (pcbDataReturned)
1397 *pcbDataReturned = sizeof(uint32_t);
1398
1399 return VINF_SUCCESS;
1400}
1401
1402#endif /* VBOX_WITH_HGCM */
1403
1404/**
1405 * Guest backdoor logging.
1406 *
1407 * @returns VBox status code.
1408 *
1409 * @param pch The log message (need not be NULL terminated).
1410 * @param cbData Size of the buffer.
1411 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
1412 */
1413static int VBoxGuestCommonIOCtl_Log(const char *pch, size_t cbData, size_t *pcbDataReturned)
1414{
1415 Log(("%.*s", cbData, pch));
1416 if (pcbDataReturned)
1417 *pcbDataReturned = 0;
1418 return VINF_SUCCESS;
1419}
1420
1421
1422/**
1423 * Common IOCtl for user to kernel and kernel to kernel communcation.
1424 *
1425 * This function only does the basic validation and then invokes
1426 * worker functions that takes care of each specific function.
1427 *
1428 * @returns VBox status code.
1429 *
1430 * @param iFunction The requested function.
1431 * @param pDevExt The device extension.
1432 * @param pSession The client session.
1433 * @param pvData The input/output data buffer. Can be NULL depending on the function.
1434 * @param cbData The max size of the data buffer.
1435 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
1436 */
1437int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1438 void *pvData, size_t cbData, size_t *pcbDataReturned)
1439{
1440 Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
1441 iFunction, pDevExt, pSession, pvData, cbData));
1442
1443 /*
1444 * Make sure the returned data size is set to zero.
1445 */
1446 if (pcbDataReturned)
1447 *pcbDataReturned = 0;
1448
1449 /*
1450 * Define some helper macros to simplify validation.
1451 */
1452#define CHECKRET_RING0(mnemonic) \
1453 do { \
1454 if (pSession->R0Process != NIL_RTR0PROCESS) \
1455 { \
1456 Log(("VBoxGuestCommonIOCtl: " mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
1457 pSession->Process, (uintptr_t)pSession->R0Process)); \
1458 return VERR_PERMISSION_DENIED; \
1459 } \
1460 } while (0)
1461#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
1462 do { \
1463 if (cbData < (cbMin)) \
1464 { \
1465 Log(("VBoxGuestCommonIOCtl: " mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
1466 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
1467 return VERR_BUFFER_OVERFLOW; \
1468 } \
1469 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
1470 { \
1471 Log(("VBoxGuestCommonIOCtl: " mnemonic ": Invalid pointer %p\n", pvData)); \
1472 return VERR_INVALID_POINTER; \
1473 } \
1474 } while (0)
1475
1476
1477 /*
1478 * Deal with variably sized requests first.
1479 */
1480 int rc = VINF_SUCCESS;
1481 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
1482 {
1483 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
1484 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
1485 }
1486#ifdef VBOX_WITH_HGCM
1487 /*
1488 * These ones are a bit tricky.
1489 */
1490 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
1491 {
1492 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
1493 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
1494 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
1495 fInterruptible, false /*f32bit*/,
1496 0, cbData, pcbDataReturned);
1497 }
1498 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
1499 {
1500 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
1501 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
1502 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
1503 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
1504 false /*f32bit*/,
1505 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
1506 }
1507# ifdef RT_ARCH_AMD64
1508 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
1509 {
1510 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
1511 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
1512 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
1513 fInterruptible, true /*f32bit*/,
1514 0, cbData, pcbDataReturned);
1515 }
1516 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
1517 {
1518 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
1519 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
1520 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
1521 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
1522 true /*f32bit*/,
1523 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
1524 }
1525# endif
1526#endif /* VBOX_WITH_HGCM */
1527 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
1528 {
1529 CHECKRET_MIN_SIZE("LOG", 1);
1530 rc = VBoxGuestCommonIOCtl_Log((char *)pvData, cbData, pcbDataReturned);
1531 }
1532 else
1533 {
1534 switch (iFunction)
1535 {
1536 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
1537 CHECKRET_RING0("GETVMMDEVPORT");
1538 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
1539 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
1540 break;
1541
1542 case VBOXGUEST_IOCTL_WAITEVENT:
1543 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
1544 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, pSession, (VBoxGuestWaitEventInfo *)pvData,
1545 pcbDataReturned, pSession->R0Process != NIL_RTR0PROCESS);
1546 break;
1547
1548 case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
1549 if (cbData != 0)
1550 rc = VERR_INVALID_PARAMETER;
1551 rc = VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
1552 break;
1553
1554 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
1555 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
1556 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, (VBoxGuestFilterMaskInfo *)pvData);
1557 break;
1558
1559#ifdef VBOX_WITH_HGCM
1560 case VBOXGUEST_IOCTL_HGCM_CONNECT:
1561# ifdef RT_ARCH_AMD64
1562 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
1563# endif
1564 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
1565 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
1566 break;
1567
1568 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
1569# ifdef RT_ARCH_AMD64
1570 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
1571# endif
1572 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
1573 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
1574 break;
1575
1576 case VBOXGUEST_IOCTL_CLIPBOARD_CONNECT:
1577 CHECKRET_MIN_SIZE("CLIPBOARD_CONNECT", sizeof(uint32_t));
1578 rc = VBoxGuestCommonIOCtl_HGCMClipboardReConnect(pDevExt, (uint32_t *)pvData, pcbDataReturned);
1579 break;
1580#endif /* VBOX_WITH_HGCM */
1581
1582 default:
1583 {
1584 Log(("VBoxGuestCommonIOCtl: Unknown request iFunction=%#x Stripped size=%#x\n", iFunction,
1585 VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
1586 rc = VERR_NOT_SUPPORTED;
1587 break;
1588 }
1589 }
1590 }
1591
1592 Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
1593 return rc;
1594}
1595
1596
1597
1598/**
1599 * Common interrupt service routine.
1600 *
1601 * This deals with events and with waking up thread waiting for those events.
1602 *
1603 * @returns true if it was our interrupt, false if it wasn't.
1604 * @param pDevExt The VBoxGuest device extension.
1605 */
1606bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
1607{
1608 bool fMousePositionChanged = false;
1609 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1610 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
1611 int rc = 0;
1612 bool fOurIrq;
1613
1614 /*
1615 * Make sure we've initalized the device extension.
1616 */
1617 if (RT_UNLIKELY(!pReq))
1618 return false;
1619
1620 /*
1621 * Enter the spinlock and check if it's our IRQ or not.
1622 */
1623 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1624 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
1625 if (fOurIrq)
1626 {
1627 /*
1628 * Acknowlegde events.
1629 * We don't use VbglGRPerform here as it may take another spinlocks.
1630 */
1631 pReq->header.rc = VERR_INTERNAL_ERROR;
1632 pReq->events = 0;
1633 ASMCompilerBarrier();
1634 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
1635 ASMCompilerBarrier(); /* paranoia */
1636 if (RT_SUCCESS(pReq->header.rc))
1637 {
1638 uint32_t fEvents = pReq->events;
1639 PVBOXGUESTWAIT pWait;
1640
1641 Log(("VBoxGuestCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
1642
1643 /*
1644 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
1645 */
1646 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
1647 {
1648 fMousePositionChanged = true;
1649 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
1650 }
1651
1652#ifdef VBOX_WITH_HGCM
1653 /*
1654 * The HGCM event/list is kind of different in that we evaluate all entries.
1655 */
1656 if (fEvents & VMMDEV_EVENT_HGCM)
1657 {
1658 for (pWait = pDevExt->HGCMWaitList.pHead; pWait; pWait = pWait->pNext)
1659 if ( !pWait->fResEvents
1660 && (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE))
1661 {
1662 pWait->fResEvents = VMMDEV_EVENT_HGCM;
1663 rc |= RTSemEventMultiSignal(pWait->Event);
1664 }
1665 fEvents &= ~VMMDEV_EVENT_HGCM;
1666 }
1667#endif
1668
1669 /*
1670 * Normal FIFO waiter evaluation.
1671 */
1672 fEvents |= pDevExt->f32PendingEvents;
1673 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
1674 if ( (pWait->fReqEvents & fEvents)
1675 && !pWait->fResEvents)
1676 {
1677 pWait->fResEvents = pWait->fReqEvents & fEvents;
1678 fEvents &= ~pWait->fResEvents;
1679 rc |= RTSemEventMultiSignal(pWait->Event);
1680 if (!fEvents)
1681 break;
1682 }
1683 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
1684 }
1685 else /* something is serious wrong... */
1686 Log(("VBoxGuestCommonISR: acknowledge events failed rc=%d (events=%#x)!!\n",
1687 pReq->header.rc, pReq->events));
1688 }
1689 else
1690 LogFlow(("VBoxGuestCommonISR: not ours\n"));
1691
1692 /*
1693 * Work the poll and async notification queues on OSes that implements that.
1694 * Do this outside the spinlock to prevent some recursive spinlocking.
1695 */
1696 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1697
1698 if (fMousePositionChanged)
1699 {
1700 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
1701 VBoxGuestNativeISRMousePollEvent(pDevExt);
1702 }
1703
1704 Assert(rc == 0);
1705 return fOurIrq;
1706}
1707
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette