VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 54613

Last change on this file since 54613 was 54613, checked in by vboxsync, 10 years ago

VBoxGuest: Optimized vbgdBitUsageTrackerChange and got rid of some unnecessary member type prefixes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 132.6 KB
Line 
1/* $Id: VBoxGuest.cpp 54613 2015-03-03 23:00:35Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#define LOG_GROUP LOG_GROUP_DEFAULT
32#include "VBoxGuestInternal.h"
33#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
34#include <VBox/log.h>
35#include <iprt/mem.h>
36#include <iprt/time.h>
37#include <iprt/memobj.h>
38#include <iprt/asm.h>
39#include <iprt/asm-amd64-x86.h>
40#include <iprt/string.h>
41#include <iprt/process.h>
42#include <iprt/assert.h>
43#include <iprt/param.h>
44#include <iprt/timer.h>
45#ifdef VBOX_WITH_HGCM
46# include <iprt/thread.h>
47#endif
48#include "version-generated.h"
49#if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
50# include "revision-generated.h"
51#endif
52#ifdef RT_OS_WINDOWS
53# ifndef CTL_CODE
54# include <Windows.h>
55# endif
56#endif
57#if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
58# include <iprt/rand.h>
59#endif
60
61
62/*******************************************************************************
63* Defined Constants And Macros *
64*******************************************************************************/
65#define VBOXGUEST_ACQUIRE_STYLE_EVENTS (VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST | VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST)
66
67
68/*******************************************************************************
69* Structures and Typedefs *
70*******************************************************************************/
71/** Host flags to be updated by a given invocation of the
72 * vboxGuestUpdateHostFlags() method. */
73/** @todo r=bird: Use RT_BIT_32 for the bits, preferably replace enum with
74 * \#define. */
75enum
76{
77 HostFlags_FilterMask = 1,
78 HostFlags_MouseStatus = 4,
79 HostFlags_All = 7,
80 HostFlags_SizeHack = (unsigned)-1
81};
82
83
84/*******************************************************************************
85* Internal Functions *
86*******************************************************************************/
87#ifdef VBOX_WITH_HGCM
88static DECLCALLBACK(int) vbgdHgcmAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
89#endif
90static int vbgdIoCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession);
91static void vbgdBitUsageTrackerClear(PVBOXGUESTBITUSAGETRACER pTracker);
92static uint32_t vbgdGetAllowedEventMaskForSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession);
93static int vbgdResetEventFilterOnHost(PVBOXGUESTDEVEXT pDevExt, uint32_t fFixedEvents);
94static int vbgdResetMouseStatusOnHost(PVBOXGUESTDEVEXT pDevExt);
95static int vbgdResetCapabilitiesOnHost(PVBOXGUESTDEVEXT pDevExt);
96static int vbgdSetSessionEventFilter(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
97 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination);
98static int vbgdSetSessionMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
99 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination);
100static int vbgdSetSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
101 uint32_t fOrMask, uint32_t fNoMask, bool fSessionTermination);
102static int vbgdAcquireSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fOrMask,
103 uint32_t fNotMask, VBOXGUESTCAPSACQUIRE_FLAGS enmFlags, bool fSessionTermination);
104static int vbgdDispatchEventsLocked(PVBOXGUESTDEVEXT pDevExt, uint32_t fEvents);
105
106
107/*******************************************************************************
108* Global Variables *
109*******************************************************************************/
110static const uint32_t g_cbChangeMemBalloonReq = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
111
112#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
113/**
114 * Drag in the rest of IRPT since we share it with the
115 * rest of the kernel modules on Solaris.
116 */
117PFNRT g_apfnVBoxGuestIPRTDeps[] =
118{
119 /* VirtioNet */
120 (PFNRT)RTRandBytes,
121 /* RTSemMutex* */
122 (PFNRT)RTSemMutexCreate,
123 (PFNRT)RTSemMutexDestroy,
124 (PFNRT)RTSemMutexRequest,
125 (PFNRT)RTSemMutexRequestNoResume,
126 (PFNRT)RTSemMutexRequestDebug,
127 (PFNRT)RTSemMutexRequestNoResumeDebug,
128 (PFNRT)RTSemMutexRelease,
129 (PFNRT)RTSemMutexIsOwned,
130 NULL
131};
132#endif /* RT_OS_DARWIN || RT_OS_SOLARIS */
133
134
135/**
136 * Reserves memory in which the VMM can relocate any guest mappings
137 * that are floating around.
138 *
139 * This operation is a little bit tricky since the VMM might not accept
140 * just any address because of address clashes between the three contexts
141 * it operates in, so use a small stack to perform this operation.
142 *
143 * @returns VBox status code (ignored).
144 * @param pDevExt The device extension.
145 */
146static int vbgdInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
147{
148 /*
149 * Query the required space.
150 */
151 VMMDevReqHypervisorInfo *pReq;
152 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
153 if (RT_FAILURE(rc))
154 return rc;
155 pReq->hypervisorStart = 0;
156 pReq->hypervisorSize = 0;
157 rc = VbglGRPerform(&pReq->header);
158 if (RT_FAILURE(rc)) /* this shouldn't happen! */
159 {
160 VbglGRFree(&pReq->header);
161 return rc;
162 }
163
164 /*
165 * The VMM will report back if there is nothing it wants to map, like for
166 * instance in VT-x and AMD-V mode.
167 */
168 if (pReq->hypervisorSize == 0)
169 Log(("vbgdInitFixateGuestMappings: nothing to do\n"));
170 else
171 {
172 /*
173 * We have to try several times since the host can be picky
174 * about certain addresses.
175 */
176 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
177 uint32_t cbHypervisor = pReq->hypervisorSize;
178 RTR0MEMOBJ ahTries[5];
179 uint32_t iTry;
180 bool fBitched = false;
181 Log(("vbgdInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
182 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
183 {
184 /*
185 * Reserve space, or if that isn't supported, create a object for
186 * some fictive physical memory and map that in to kernel space.
187 *
188 * To make the code a bit uglier, most systems cannot help with
189 * 4MB alignment, so we have to deal with that in addition to
190 * having two ways of getting the memory.
191 */
192 uint32_t uAlignment = _4M;
193 RTR0MEMOBJ hObj;
194 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
195 if (rc == VERR_NOT_SUPPORTED)
196 {
197 uAlignment = PAGE_SIZE;
198 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
199 }
200 /*
201 * If both RTR0MemObjReserveKernel calls above failed because either not supported or
202 * not implemented at all at the current platform, try to map the memory object into the
203 * virtual kernel space.
204 */
205 if (rc == VERR_NOT_SUPPORTED)
206 {
207 if (hFictive == NIL_RTR0MEMOBJ)
208 {
209 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
210 if (RT_FAILURE(rc))
211 break;
212 hFictive = hObj;
213 }
214 uAlignment = _4M;
215 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
216 if (rc == VERR_NOT_SUPPORTED)
217 {
218 uAlignment = PAGE_SIZE;
219 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
220 }
221 }
222 if (RT_FAILURE(rc))
223 {
224 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
225 rc, cbHypervisor, uAlignment, iTry));
226 fBitched = true;
227 break;
228 }
229
230 /*
231 * Try set it.
232 */
233 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
234 pReq->header.rc = VERR_INTERNAL_ERROR;
235 pReq->hypervisorSize = cbHypervisor;
236 pReq->hypervisorStart = (RTGCPTR32)(uintptr_t)RTR0MemObjAddress(hObj);
237 if ( uAlignment == PAGE_SIZE
238 && pReq->hypervisorStart & (_4M - 1))
239 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
240 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
241
242 rc = VbglGRPerform(&pReq->header);
243 if (RT_SUCCESS(rc))
244 {
245 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
246 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
247 RTR0MemObjAddress(pDevExt->hGuestMappings),
248 RTR0MemObjSize(pDevExt->hGuestMappings),
249 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
250 break;
251 }
252 ahTries[iTry] = hObj;
253 }
254
255 /*
256 * Cleanup failed attempts.
257 */
258 while (iTry-- > 0)
259 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
260 if ( RT_FAILURE(rc)
261 && hFictive != NIL_RTR0PTR)
262 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
263 if (RT_FAILURE(rc) && !fBitched)
264 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
265 }
266 VbglGRFree(&pReq->header);
267
268 /*
269 * We ignore failed attempts for now.
270 */
271 return VINF_SUCCESS;
272}
273
274
275/**
276 * Undo what vbgdInitFixateGuestMappings did.
277 *
278 * @param pDevExt The device extension.
279 */
280static void vbgdTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
281{
282 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
283 {
284 /*
285 * Tell the host that we're going to free the memory we reserved for
286 * it, the free it up. (Leak the memory if anything goes wrong here.)
287 */
288 VMMDevReqHypervisorInfo *pReq;
289 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
290 if (RT_SUCCESS(rc))
291 {
292 pReq->hypervisorStart = 0;
293 pReq->hypervisorSize = 0;
294 rc = VbglGRPerform(&pReq->header);
295 VbglGRFree(&pReq->header);
296 }
297 if (RT_SUCCESS(rc))
298 {
299 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
300 AssertRC(rc);
301 }
302 else
303 LogRel(("vbgdTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
304
305 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
306 }
307}
308
309
310
311/**
312 * Report the guest information to the host.
313 *
314 * @returns IPRT status code.
315 * @param enmOSType The OS type to report.
316 */
317static int vbgdReportGuestInfo(VBOXOSTYPE enmOSType)
318{
319 /*
320 * Allocate and fill in the two guest info reports.
321 */
322 VMMDevReportGuestInfo2 *pReqInfo2 = NULL;
323 VMMDevReportGuestInfo *pReqInfo1 = NULL;
324 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReqInfo2, sizeof (VMMDevReportGuestInfo2), VMMDevReq_ReportGuestInfo2);
325 Log(("vbgdReportGuestInfo: VbglGRAlloc VMMDevReportGuestInfo2 completed with rc=%Rrc\n", rc));
326 if (RT_SUCCESS(rc))
327 {
328 pReqInfo2->guestInfo.additionsMajor = VBOX_VERSION_MAJOR;
329 pReqInfo2->guestInfo.additionsMinor = VBOX_VERSION_MINOR;
330 pReqInfo2->guestInfo.additionsBuild = VBOX_VERSION_BUILD;
331 pReqInfo2->guestInfo.additionsRevision = VBOX_SVN_REV;
332 pReqInfo2->guestInfo.additionsFeatures = 0; /* (no features defined yet) */
333 RTStrCopy(pReqInfo2->guestInfo.szName, sizeof(pReqInfo2->guestInfo.szName), VBOX_VERSION_STRING);
334
335 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReqInfo1, sizeof (VMMDevReportGuestInfo), VMMDevReq_ReportGuestInfo);
336 Log(("vbgdReportGuestInfo: VbglGRAlloc VMMDevReportGuestInfo completed with rc=%Rrc\n", rc));
337 if (RT_SUCCESS(rc))
338 {
339 pReqInfo1->guestInfo.interfaceVersion = VMMDEV_VERSION;
340 pReqInfo1->guestInfo.osType = enmOSType;
341
342 /*
343 * There are two protocols here:
344 * 1. Info2 + Info1. Supported by >=3.2.51.
345 * 2. Info1 and optionally Info2. The old protocol.
346 *
347 * We try protocol 1 first. It will fail with VERR_NOT_SUPPORTED
348 * if not supported by the VMMDev (message ordering requirement).
349 */
350 rc = VbglGRPerform(&pReqInfo2->header);
351 Log(("vbgdReportGuestInfo: VbglGRPerform VMMDevReportGuestInfo2 completed with rc=%Rrc\n", rc));
352 if (RT_SUCCESS(rc))
353 {
354 rc = VbglGRPerform(&pReqInfo1->header);
355 Log(("vbgdReportGuestInfo: VbglGRPerform VMMDevReportGuestInfo completed with rc=%Rrc\n", rc));
356 }
357 else if ( rc == VERR_NOT_SUPPORTED
358 || rc == VERR_NOT_IMPLEMENTED)
359 {
360 rc = VbglGRPerform(&pReqInfo1->header);
361 Log(("vbgdReportGuestInfo: VbglGRPerform VMMDevReportGuestInfo completed with rc=%Rrc\n", rc));
362 if (RT_SUCCESS(rc))
363 {
364 rc = VbglGRPerform(&pReqInfo2->header);
365 Log(("vbgdReportGuestInfo: VbglGRPerform VMMDevReportGuestInfo2 completed with rc=%Rrc\n", rc));
366 if (rc == VERR_NOT_IMPLEMENTED)
367 rc = VINF_SUCCESS;
368 }
369 }
370 VbglGRFree(&pReqInfo1->header);
371 }
372 VbglGRFree(&pReqInfo2->header);
373 }
374
375 return rc;
376}
377
378
379/**
380 * Report the guest driver status to the host.
381 *
382 * @returns IPRT status code.
383 * @param fActive Flag whether the driver is now active or not.
384 */
385static int vbgdReportDriverStatus(bool fActive)
386{
387 /*
388 * Report guest status of the VBox driver to the host.
389 */
390 VMMDevReportGuestStatus *pReq2 = NULL;
391 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq2, sizeof(*pReq2), VMMDevReq_ReportGuestStatus);
392 Log(("vbgdReportDriverStatus: VbglGRAlloc VMMDevReportGuestStatus completed with rc=%Rrc\n", rc));
393 if (RT_SUCCESS(rc))
394 {
395 pReq2->guestStatus.facility = VBoxGuestFacilityType_VBoxGuestDriver;
396 pReq2->guestStatus.status = fActive ?
397 VBoxGuestFacilityStatus_Active
398 : VBoxGuestFacilityStatus_Inactive;
399 pReq2->guestStatus.flags = 0;
400 rc = VbglGRPerform(&pReq2->header);
401 Log(("vbgdReportDriverStatus: VbglGRPerform VMMDevReportGuestStatus completed with fActive=%d, rc=%Rrc\n",
402 fActive ? 1 : 0, rc));
403 if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
404 rc = VINF_SUCCESS;
405 VbglGRFree(&pReq2->header);
406 }
407
408 return rc;
409}
410
411
412/** @name Memory Ballooning
413 * @{
414 */
415
416/**
417 * Inflate the balloon by one chunk represented by an R0 memory object.
418 *
419 * The caller owns the balloon mutex.
420 *
421 * @returns IPRT status code.
422 * @param pMemObj Pointer to the R0 memory object.
423 * @param pReq The pre-allocated request for performing the VMMDev call.
424 */
425static int vbgdBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
426{
427 uint32_t iPage;
428 int rc;
429
430 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
431 {
432 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
433 pReq->aPhysPage[iPage] = phys;
434 }
435
436 pReq->fInflate = true;
437 pReq->header.size = g_cbChangeMemBalloonReq;
438 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
439
440 rc = VbglGRPerform(&pReq->header);
441 if (RT_FAILURE(rc))
442 LogRel(("vbgdBalloonInflate: VbglGRPerform failed. rc=%Rrc\n", rc));
443 return rc;
444}
445
446
447/**
448 * Deflate the balloon by one chunk - info the host and free the memory object.
449 *
450 * The caller owns the balloon mutex.
451 *
452 * @returns IPRT status code.
453 * @param pMemObj Pointer to the R0 memory object.
454 * The memory object will be freed afterwards.
455 * @param pReq The pre-allocated request for performing the VMMDev call.
456 */
457static int vbgdBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
458{
459 uint32_t iPage;
460 int rc;
461
462 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
463 {
464 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
465 pReq->aPhysPage[iPage] = phys;
466 }
467
468 pReq->fInflate = false;
469 pReq->header.size = g_cbChangeMemBalloonReq;
470 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
471
472 rc = VbglGRPerform(&pReq->header);
473 if (RT_FAILURE(rc))
474 {
475 LogRel(("vbgdBalloonDeflate: VbglGRPerform failed. rc=%Rrc\n", rc));
476 return rc;
477 }
478
479 rc = RTR0MemObjFree(*pMemObj, true);
480 if (RT_FAILURE(rc))
481 {
482 LogRel(("vbgdBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
483 return rc;
484 }
485
486 *pMemObj = NIL_RTR0MEMOBJ;
487 return VINF_SUCCESS;
488}
489
490
491/**
492 * Inflate/deflate the memory balloon and notify the host.
493 *
494 * This is a worker used by vbgdIoCtl_CheckMemoryBalloon - it takes the mutex.
495 *
496 * @returns VBox status code.
497 * @param pDevExt The device extension.
498 * @param pSession The session.
499 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
500 * @param pfHandleInR3 Where to return the handle-in-ring3 indicator
501 * (VINF_SUCCESS if set).
502 */
503static int vbgdSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, uint32_t *pfHandleInR3)
504{
505 int rc = VINF_SUCCESS;
506
507 if (pDevExt->MemBalloon.fUseKernelAPI)
508 {
509 VMMDevChangeMemBalloon *pReq;
510 uint32_t i;
511
512 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
513 {
514 LogRel(("vbgdSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
515 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
516 return VERR_INVALID_PARAMETER;
517 }
518
519 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
520 return VINF_SUCCESS; /* nothing to do */
521
522 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
523 && !pDevExt->MemBalloon.paMemObj)
524 {
525 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
526 if (!pDevExt->MemBalloon.paMemObj)
527 {
528 LogRel(("vbgdSetBalloonSizeKernel: no memory for paMemObj!\n"));
529 return VERR_NO_MEMORY;
530 }
531 }
532
533 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, g_cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
534 if (RT_FAILURE(rc))
535 return rc;
536
537 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
538 {
539 /* inflate */
540 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
541 {
542 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
543 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
544 if (RT_FAILURE(rc))
545 {
546 if (rc == VERR_NOT_SUPPORTED)
547 {
548 /* not supported -- fall back to the R3-allocated memory. */
549 rc = VINF_SUCCESS;
550 pDevExt->MemBalloon.fUseKernelAPI = false;
551 Assert(pDevExt->MemBalloon.cChunks == 0);
552 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
553 }
554 /* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
555 * cannot allocate more memory => don't try further, just stop here */
556 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
557 break;
558 }
559
560 rc = vbgdBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
561 if (RT_FAILURE(rc))
562 {
563 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
564 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
565 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
566 break;
567 }
568 pDevExt->MemBalloon.cChunks++;
569 }
570 }
571 else
572 {
573 /* deflate */
574 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
575 {
576 rc = vbgdBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
577 if (RT_FAILURE(rc))
578 {
579 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
580 break;
581 }
582 pDevExt->MemBalloon.cChunks--;
583 }
584 }
585
586 VbglGRFree(&pReq->header);
587 }
588
589 /*
590 * Set the handle-in-ring3 indicator. When set Ring-3 will have to work
591 * the balloon changes via the other API.
592 */
593 *pfHandleInR3 = pDevExt->MemBalloon.fUseKernelAPI ? false : true;
594
595 return rc;
596}
597
598
599/**
600 * Inflate/deflate the balloon by one chunk.
601 *
602 * Worker for vbgdIoCtl_ChangeMemoryBalloon - it takes the mutex.
603 *
604 * @returns VBox status code.
605 * @param pDevExt The device extension.
606 * @param pSession The session.
607 * @param u64ChunkAddr The address of the chunk to add to / remove from the
608 * balloon.
609 * @param fInflate Inflate if true, deflate if false.
610 */
611static int vbgdSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint64_t u64ChunkAddr, bool fInflate)
612{
613 VMMDevChangeMemBalloon *pReq;
614 int rc = VINF_SUCCESS;
615 uint32_t i;
616 PRTR0MEMOBJ pMemObj = NULL;
617
618 if (fInflate)
619 {
620 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
621 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
622 {
623 LogRel(("vboxGuestSetBalloonSize: cannot inflate balloon, already have %u chunks (max=%u)\n",
624 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
625 return VERR_INVALID_PARAMETER;
626 }
627
628 if (!pDevExt->MemBalloon.paMemObj)
629 {
630 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
631 if (!pDevExt->MemBalloon.paMemObj)
632 {
633 LogRel(("VBoxGuestSetBalloonSizeFromUser: no memory for paMemObj!\n"));
634 return VERR_NO_MEMORY;
635 }
636 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
637 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
638 }
639 }
640 else
641 {
642 if (pDevExt->MemBalloon.cChunks == 0)
643 {
644 AssertMsgFailed(("vboxGuestSetBalloonSize: cannot decrease balloon, already at size 0\n"));
645 return VERR_INVALID_PARAMETER;
646 }
647 }
648
649 /*
650 * Enumerate all memory objects and check if the object is already registered.
651 */
652 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
653 {
654 if ( fInflate
655 && !pMemObj
656 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
657 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
658 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == u64ChunkAddr)
659 {
660 if (fInflate)
661 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
662 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
663 break;
664 }
665 }
666 if (!pMemObj)
667 {
668 if (fInflate)
669 {
670 /* no free object pointer found -- should not happen */
671 return VERR_NO_MEMORY;
672 }
673
674 /* cannot free this memory as it wasn't provided before */
675 return VERR_NOT_FOUND;
676 }
677
678 /*
679 * Try inflate / default the balloon as requested.
680 */
681 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, g_cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
682 if (RT_FAILURE(rc))
683 return rc;
684
685 if (fInflate)
686 {
687 rc = RTR0MemObjLockUser(pMemObj, (RTR3PTR)u64ChunkAddr, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
688 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
689 if (RT_SUCCESS(rc))
690 {
691 rc = vbgdBalloonInflate(pMemObj, pReq);
692 if (RT_SUCCESS(rc))
693 pDevExt->MemBalloon.cChunks++;
694 else
695 {
696 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
697 RTR0MemObjFree(*pMemObj, true);
698 *pMemObj = NIL_RTR0MEMOBJ;
699 }
700 }
701 }
702 else
703 {
704 rc = vbgdBalloonDeflate(pMemObj, pReq);
705 if (RT_SUCCESS(rc))
706 pDevExt->MemBalloon.cChunks--;
707 else
708 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
709 }
710
711 VbglGRFree(&pReq->header);
712 return rc;
713}
714
715
716/**
717 * Cleanup the memory balloon of a session.
718 *
719 * Will request the balloon mutex, so it must be valid and the caller must not
720 * own it already.
721 *
722 * @param pDevExt The device extension.
723 * @param pDevExt The session. Can be NULL at unload.
724 */
725static void vbgdCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
726{
727 RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
728 if ( pDevExt->MemBalloon.pOwner == pSession
729 || pSession == NULL /*unload*/)
730 {
731 if (pDevExt->MemBalloon.paMemObj)
732 {
733 VMMDevChangeMemBalloon *pReq;
734 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, g_cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
735 if (RT_SUCCESS(rc))
736 {
737 uint32_t i;
738 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
739 {
740 rc = vbgdBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
741 if (RT_FAILURE(rc))
742 {
743 LogRel(("vbgdCloseMemBalloon: Deflate failed with rc=%Rrc. Will leak %u chunks.\n",
744 rc, pDevExt->MemBalloon.cChunks));
745 break;
746 }
747 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
748 pDevExt->MemBalloon.cChunks--;
749 }
750 VbglGRFree(&pReq->header);
751 }
752 else
753 LogRel(("vbgdCloseMemBalloon: Failed to allocate VMMDev request buffer (rc=%Rrc). Will leak %u chunks.\n",
754 rc, pDevExt->MemBalloon.cChunks));
755 RTMemFree(pDevExt->MemBalloon.paMemObj);
756 pDevExt->MemBalloon.paMemObj = NULL;
757 }
758
759 pDevExt->MemBalloon.pOwner = NULL;
760 }
761 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
762}
763
764/** @} */
765
766
767
768/** @name Heartbeat
769 * @{
770 */
771
772/**
773 * Sends heartbeat to host.
774 *
775 * @returns VBox status code.
776 */
777static int vbgdHeartbeatSend(PVBOXGUESTDEVEXT pDevExt)
778{
779 int rc;
780 if (pDevExt->pReqGuestHeartbeat)
781 {
782 rc = VbglGRPerform(pDevExt->pReqGuestHeartbeat);
783 Log(("vbgdHeartbeatSend: VbglGRPerform vbgdHeartbeatSend completed with rc=%Rrc\n", rc));
784 }
785 else
786 rc = VERR_INVALID_STATE;
787 return rc;
788}
789
790
791/**
792 * Callback for heartbeat timer.
793 */
794static DECLCALLBACK(void) vbgdHeartbeatTimerHandler(PRTTIMER hTimer, void *pvUser, uint64_t iTick)
795{
796 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
797 int rc;
798 AssertReturnVoid(pDevExt);
799
800 rc = vbgdHeartbeatSend(pDevExt);
801 if (RT_FAILURE(rc))
802 Log(("HB Timer: vbgdHeartbeatSend failed: rc=%Rrc\n", rc));
803
804 NOREF(hTimer); NOREF(iTick);
805}
806
807
808/**
809 * Configure the host to check guest's heartbeat
810 * and get heartbeat interval from the host.
811 *
812 * @returns VBox status code.
813 * @param pDevExt The device extension.
814 * @param fEnabled Set true to enable guest heartbeat checks on host.
815 */
816static int vbgdHeartbeatHostConfigure(PVBOXGUESTDEVEXT pDevExt, bool fEnabled)
817{
818 VMMDevReqHeartbeat *pReq;
819 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_HeartbeatConfigure);
820 Log(("vbgdHeartbeatHostConfigure: VbglGRAlloc vbgdHeartbeatHostConfigure completed with rc=%Rrc\n", rc));
821 if (RT_SUCCESS(rc))
822 {
823 pReq->fEnabled = fEnabled;
824 pReq->cNsInterval = 0;
825 rc = VbglGRPerform(&pReq->header);
826 Log(("vbgdHeartbeatHostConfigure: VbglGRPerform vbgdHeartbeatHostConfigure completed with rc=%Rrc\n", rc));
827 pDevExt->cNsHeartbeatInterval = pReq->cNsInterval;
828 VbglGRFree(&pReq->header);
829 }
830 return rc;
831}
832
833
834/**
835 * Initializes the heartbeat timer.
836 *
837 * This feature may be disabled by the host.
838 *
839 * @returns VBox status (ignored).
840 * @param pDevExt The device extension.
841 */
842static int vbgdHeartbeatInit(PVBOXGUESTDEVEXT pDevExt)
843{
844 /*
845 * Make sure that heartbeat checking is disabled.
846 */
847 int rc = vbgdHeartbeatHostConfigure(pDevExt, false);
848 if (RT_SUCCESS(rc))
849 {
850 rc = vbgdHeartbeatHostConfigure(pDevExt, true);
851 if (RT_SUCCESS(rc))
852 {
853 /*
854 * Preallocate the request to use it from the timer callback because:
855 * 1) on Windows VbglGRAlloc must be called at IRQL <= APC_LEVEL
856 * and the timer callback runs at DISPATCH_LEVEL;
857 * 2) avoid repeated allocations.
858 */
859 rc = VbglGRAlloc(&pDevExt->pReqGuestHeartbeat, sizeof(*pDevExt->pReqGuestHeartbeat), VMMDevReq_GuestHeartbeat);
860 if (RT_SUCCESS(rc))
861 {
862 LogRel(("VbgdCommonInitDevExt: Setting up heartbeat to trigger every %RU64 sec\n",
863 pDevExt->cNsHeartbeatInterval / RT_NS_1SEC));
864 rc = RTTimerCreateEx(&pDevExt->pHeartbeatTimer, pDevExt->cNsHeartbeatInterval, 0 /*fFlags*/,
865 (PFNRTTIMER)vbgdHeartbeatTimerHandler, pDevExt);
866 if (RT_SUCCESS(rc))
867 {
868 rc = RTTimerStart(pDevExt->pHeartbeatTimer, 0);
869 if (RT_SUCCESS(rc))
870 return VINF_SUCCESS;
871
872 LogRel(("VbgdCommonInitDevExt: Heartbeat timer failed to start, rc=%Rrc\n", rc));
873 }
874 else
875 LogRel(("VbgdCommonInitDevExt: Failed to create heartbeat timer: %Rrc\n", rc));
876
877 VbglGRFree(pDevExt->pReqGuestHeartbeat);
878 pDevExt->pReqGuestHeartbeat = NULL;
879 }
880 else
881 LogRel(("VbgdCommonInitDevExt: VbglGRAlloc(VMMDevReq_GuestHeartbeat): %Rrc\n", rc));
882
883 LogRel(("VbgdCommonInitDevExt: Failed to set up the timer, guest heartbeat is disabled\n"));
884 vbgdHeartbeatHostConfigure(pDevExt, false);
885 }
886 else
887 LogRel(("VbgdCommonInitDevExt: Failed to configure host for heartbeat checking: rc=%Rrc\n", rc));
888 }
889 return rc;
890}
891
892/** @} */
893
894
895/**
896 * Helper to reinit the VMMDev communication after hibernation.
897 *
898 * @returns VBox status code.
899 * @param pDevExt The device extension.
900 * @param enmOSType The OS type.
901 *
902 * @todo Call this on all platforms, not just windows.
903 */
904int VbgdCommonReinitDevExtAfterHibernation(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
905{
906 int rc = vbgdReportGuestInfo(enmOSType);
907 if (RT_SUCCESS(rc))
908 {
909 rc = vbgdReportDriverStatus(true /* Driver is active */);
910 if (RT_FAILURE(rc))
911 Log(("VbgdCommonReinitDevExtAfterHibernation: could not report guest driver status, rc=%Rrc\n", rc));
912 }
913 else
914 Log(("VbgdCommonReinitDevExtAfterHibernation: could not report guest information to host, rc=%Rrc\n", rc));
915 LogFlow(("VbgdCommonReinitDevExtAfterHibernation: returned with rc=%Rrc\n", rc));
916 return rc;
917}
918
919
920/**
921 * Initializes the VBoxGuest device extension when the
922 * device driver is loaded.
923 *
924 * The native code locates the VMMDev on the PCI bus and retrieve
925 * the MMIO and I/O port ranges, this function will take care of
926 * mapping the MMIO memory (if present). Upon successful return
927 * the native code should set up the interrupt handler.
928 *
929 * @returns VBox status code.
930 *
931 * @param pDevExt The device extension. Allocated by the native code.
932 * @param IOPortBase The base of the I/O port range.
933 * @param pvMMIOBase The base of the MMIO memory mapping.
934 * This is optional, pass NULL if not present.
935 * @param cbMMIO The size of the MMIO memory mapping.
936 * This is optional, pass 0 if not present.
937 * @param enmOSType The guest OS type to report to the VMMDev.
938 * @param fFixedEvents Events that will be enabled upon init and no client
939 * will ever be allowed to mask.
940 */
941int VbgdCommonInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
942 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
943{
944 int rc, rc2;
945
946#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
947 /*
948 * Create the release log.
949 */
950 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
951 PRTLOGGER pRelLogger;
952 rc = RTLogCreate(&pRelLogger, 0 /*fFlags*/, "all", "VBOXGUEST_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups,
953 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
954 if (RT_SUCCESS(rc))
955 RTLogRelSetDefaultInstance(pRelLogger);
956 /** @todo Add native hook for getting logger config parameters and setting
957 * them. On linux we should use the module parameter stuff... */
958#endif
959
960 /*
961 * Adjust fFixedEvents.
962 */
963#ifdef VBOX_WITH_HGCM
964 fFixedEvents |= VMMDEV_EVENT_HGCM;
965#endif
966
967 /*
968 * Initialize the data.
969 */
970 pDevExt->IOPortBase = IOPortBase;
971 pDevExt->pVMMDevMemory = NULL;
972 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
973 pDevExt->EventSpinlock = NIL_RTSPINLOCK;
974 pDevExt->pIrqAckEvents = NULL;
975 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
976 RTListInit(&pDevExt->WaitList);
977#ifdef VBOX_WITH_HGCM
978 RTListInit(&pDevExt->HGCMWaitList);
979#endif
980#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
981 RTListInit(&pDevExt->WakeUpList);
982#endif
983 RTListInit(&pDevExt->WokenUpList);
984 RTListInit(&pDevExt->FreeList);
985 RTListInit(&pDevExt->SessionList);
986 pDevExt->cSessions = 0;
987 pDevExt->fLoggingEnabled = false;
988 pDevExt->f32PendingEvents = 0;
989 pDevExt->u32MousePosChangedSeq = 0;
990 pDevExt->SessionSpinlock = NIL_RTSPINLOCK;
991 pDevExt->MemBalloon.hMtx = NIL_RTSEMFASTMUTEX;
992 pDevExt->MemBalloon.cChunks = 0;
993 pDevExt->MemBalloon.cMaxChunks = 0;
994 pDevExt->MemBalloon.fUseKernelAPI = true;
995 pDevExt->MemBalloon.paMemObj = NULL;
996 pDevExt->MemBalloon.pOwner = NULL;
997 pDevExt->MouseNotifyCallback.pfnNotify = NULL;
998 pDevExt->MouseNotifyCallback.pvUser = NULL;
999 pDevExt->pReqGuestHeartbeat = NULL;
1000
1001 pDevExt->fFixedEvents = fFixedEvents;
1002 vbgdBitUsageTrackerClear(&pDevExt->EventFilterTracker);
1003 pDevExt->fEventFilterHost = UINT32_MAX; /* forces a report */
1004
1005 vbgdBitUsageTrackerClear(&pDevExt->MouseStatusTracker);
1006 pDevExt->fMouseStatusHost = UINT32_MAX; /* forces a report */
1007
1008 pDevExt->fAcquireModeGuestCaps = 0;
1009 pDevExt->fSetModeGuestCaps = 0;
1010 pDevExt->fAcquiredGuestCaps = 0;
1011 vbgdBitUsageTrackerClear(&pDevExt->SetGuestCapsTracker);
1012 pDevExt->fGuestCapsHost = UINT32_MAX; /* forces a report */
1013
1014 /*
1015 * If there is an MMIO region validate the version and size.
1016 */
1017 if (pvMMIOBase)
1018 {
1019 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
1020 Assert(cbMMIO);
1021 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
1022 && pVMMDev->u32Size >= 32
1023 && pVMMDev->u32Size <= cbMMIO)
1024 {
1025 pDevExt->pVMMDevMemory = pVMMDev;
1026 Log(("VbgdCommonInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
1027 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
1028 }
1029 else /* try live without it. */
1030 LogRel(("VbgdCommonInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
1031 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
1032 }
1033
1034 /*
1035 * Create the wait and session spinlocks as well as the ballooning mutex.
1036 */
1037 rc = RTSpinlockCreate(&pDevExt->EventSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestEvent");
1038 if (RT_SUCCESS(rc))
1039 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestSession");
1040 if (RT_FAILURE(rc))
1041 {
1042 LogRel(("VbgdCommonInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
1043 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
1044 RTSpinlockDestroy(pDevExt->EventSpinlock);
1045 return rc;
1046 }
1047
1048 rc = RTSemFastMutexCreate(&pDevExt->MemBalloon.hMtx);
1049 if (RT_FAILURE(rc))
1050 {
1051 LogRel(("VbgdCommonInitDevExt: failed to create mutex, rc=%Rrc!\n", rc));
1052 RTSpinlockDestroy(pDevExt->SessionSpinlock);
1053 RTSpinlockDestroy(pDevExt->EventSpinlock);
1054 return rc;
1055 }
1056
1057 /*
1058 * Initialize the guest library and report the guest info back to VMMDev,
1059 * set the interrupt control filter mask, and fixate the guest mappings
1060 * made by the VMM.
1061 */
1062 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
1063 if (RT_SUCCESS(rc))
1064 {
1065 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
1066 if (RT_SUCCESS(rc))
1067 {
1068 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
1069 Assert(pDevExt->PhysIrqAckEvents != 0);
1070
1071 rc = vbgdReportGuestInfo(enmOSType);
1072 if (RT_SUCCESS(rc))
1073 {
1074 /*
1075 * Set the fixed event and make sure the host doesn't have any lingering
1076 * the guest capabilities or mouse status bits set.
1077 */
1078 rc = vbgdResetEventFilterOnHost(pDevExt, pDevExt->fFixedEvents);
1079 if (RT_SUCCESS(rc))
1080 {
1081 rc = vbgdResetCapabilitiesOnHost(pDevExt);
1082 if (RT_SUCCESS(rc))
1083 {
1084 rc = vbgdResetMouseStatusOnHost(pDevExt);
1085 if (RT_SUCCESS(rc))
1086 {
1087 /*
1088 * Initialize stuff which may fail without requiring the driver init to fail.
1089 */
1090 vbgdInitFixateGuestMappings(pDevExt);
1091 vbgdHeartbeatInit(pDevExt);
1092
1093 /*
1094 * Done!
1095 */
1096 rc = vbgdReportDriverStatus(true /* Driver is active */);
1097 if (RT_FAILURE(rc))
1098 LogRel(("VbgdCommonInitDevExt: VBoxReportGuestDriverStatus failed, rc=%Rrc\n", rc));
1099
1100 LogFlowFunc(("VbgdCommonInitDevExt: returns success\n"));
1101 return VINF_SUCCESS;
1102 }
1103 LogRel(("VbgdCommonInitDevExt: failed to clear mouse status: rc=%Rrc\n", rc));
1104 }
1105 else
1106 LogRel(("VbgdCommonInitDevExt: failed to clear guest capabilities: rc=%Rrc\n", rc));
1107 }
1108 else
1109 LogRel(("VbgdCommonInitDevExt: failed to set fixed event filter: rc=%Rrc\n", rc));
1110 }
1111 else
1112 LogRel(("VbgdCommonInitDevExt: VBoxReportGuestInfo failed: rc=%Rrc\n", rc));
1113 VbglGRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
1114 }
1115 else
1116 LogRel(("VbgdCommonInitDevExt: VBoxGRAlloc failed: rc=%Rrc\n", rc));
1117
1118 VbglTerminate();
1119 }
1120 else
1121 LogRel(("VbgdCommonInitDevExt: VbglInit failed: rc=%Rrc\n", rc));
1122
1123 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
1124 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
1125 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
1126
1127#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
1128 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
1129 RTLogDestroy(RTLogSetDefaultInstance(NULL));
1130#endif
1131 return rc; /* (failed) */
1132}
1133
1134
1135/**
1136 * Deletes all the items in a wait chain.
1137 * @param pList The head of the chain.
1138 */
1139static void vbgdDeleteWaitList(PRTLISTNODE pList)
1140{
1141 while (!RTListIsEmpty(pList))
1142 {
1143 int rc2;
1144 PVBOXGUESTWAIT pWait = RTListGetFirst(pList, VBOXGUESTWAIT, ListNode);
1145 RTListNodeRemove(&pWait->ListNode);
1146
1147 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
1148 pWait->Event = NIL_RTSEMEVENTMULTI;
1149 pWait->pSession = NULL;
1150 RTMemFree(pWait);
1151 }
1152}
1153
1154
1155/**
1156 * Destroys the VBoxGuest device extension.
1157 *
1158 * The native code should call this before the driver is loaded,
1159 * but don't call this on shutdown.
1160 *
1161 * @param pDevExt The device extension.
1162 */
1163void VbgdCommonDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
1164{
1165 int rc2;
1166 Log(("VbgdCommonDeleteDevExt:\n"));
1167 Log(("VBoxGuest: The additions driver is terminating.\n"));
1168
1169 /*
1170 * Stop and destroy HB timer and
1171 * disable host heartbeat checking.
1172 */
1173 if (pDevExt->pHeartbeatTimer)
1174 {
1175 RTTimerDestroy(pDevExt->pHeartbeatTimer);
1176 vbgdHeartbeatHostConfigure(pDevExt, false);
1177 }
1178
1179 VbglGRFree(pDevExt->pReqGuestHeartbeat);
1180 pDevExt->pReqGuestHeartbeat = NULL;
1181
1182 /*
1183 * Clean up the bits that involves the host first.
1184 */
1185 vbgdTermUnfixGuestMappings(pDevExt);
1186 if (!RTListIsEmpty(&pDevExt->SessionList))
1187 {
1188 LogRelFunc(("session list not empty!\n"));
1189 RTListInit(&pDevExt->SessionList);
1190 }
1191 /* Update the host flags (mouse status etc) not to reflect this session. */
1192 pDevExt->fFixedEvents = 0;
1193 vbgdResetEventFilterOnHost(pDevExt, 0 /*fFixedEvents*/);
1194 vbgdResetCapabilitiesOnHost(pDevExt);
1195 vbgdResetMouseStatusOnHost(pDevExt);
1196
1197 vbgdCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
1198
1199 /*
1200 * Cleanup all the other resources.
1201 */
1202 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
1203 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
1204 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
1205
1206 vbgdDeleteWaitList(&pDevExt->WaitList);
1207#ifdef VBOX_WITH_HGCM
1208 vbgdDeleteWaitList(&pDevExt->HGCMWaitList);
1209#endif
1210#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1211 vbgdDeleteWaitList(&pDevExt->WakeUpList);
1212#endif
1213 vbgdDeleteWaitList(&pDevExt->WokenUpList);
1214 vbgdDeleteWaitList(&pDevExt->FreeList);
1215
1216 VbglTerminate();
1217
1218 pDevExt->pVMMDevMemory = NULL;
1219
1220 pDevExt->IOPortBase = 0;
1221 pDevExt->pIrqAckEvents = NULL;
1222
1223#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
1224 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
1225 RTLogDestroy(RTLogSetDefaultInstance(NULL));
1226#endif
1227
1228}
1229
1230
1231/**
1232 * Creates a VBoxGuest user session.
1233 *
1234 * The native code calls this when a ring-3 client opens the device.
1235 * Use VbgdCommonCreateKernelSession when a ring-0 client connects.
1236 *
1237 * @returns VBox status code.
1238 * @param pDevExt The device extension.
1239 * @param ppSession Where to store the session on success.
1240 */
1241int VbgdCommonCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
1242{
1243 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
1244 if (RT_UNLIKELY(!pSession))
1245 {
1246 LogRel(("VbgdCommonCreateUserSession: no memory!\n"));
1247 return VERR_NO_MEMORY;
1248 }
1249
1250 pSession->Process = RTProcSelf();
1251 pSession->R0Process = RTR0ProcHandleSelf();
1252 pSession->pDevExt = pDevExt;
1253 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1254 RTListAppend(&pDevExt->SessionList, &pSession->ListNode);
1255 pDevExt->cSessions++;
1256 RTSpinlockRelease(pDevExt->SessionSpinlock);
1257
1258 *ppSession = pSession;
1259 LogFlow(("VbgdCommonCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1260 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1261 return VINF_SUCCESS;
1262}
1263
1264
1265/**
1266 * Creates a VBoxGuest kernel session.
1267 *
1268 * The native code calls this when a ring-0 client connects to the device.
1269 * Use VbgdCommonCreateUserSession when a ring-3 client opens the device.
1270 *
1271 * @returns VBox status code.
1272 * @param pDevExt The device extension.
1273 * @param ppSession Where to store the session on success.
1274 */
1275int VbgdCommonCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
1276{
1277 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
1278 if (RT_UNLIKELY(!pSession))
1279 {
1280 LogRel(("VbgdCommonCreateKernelSession: no memory!\n"));
1281 return VERR_NO_MEMORY;
1282 }
1283
1284 pSession->Process = NIL_RTPROCESS;
1285 pSession->R0Process = NIL_RTR0PROCESS;
1286 pSession->pDevExt = pDevExt;
1287 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1288 RTListAppend(&pDevExt->SessionList, &pSession->ListNode);
1289 pDevExt->cSessions++;
1290 RTSpinlockRelease(pDevExt->SessionSpinlock);
1291
1292 *ppSession = pSession;
1293 LogFlow(("VbgdCommonCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1294 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1295 return VINF_SUCCESS;
1296}
1297
1298
1299/**
1300 * Closes a VBoxGuest session.
1301 *
1302 * @param pDevExt The device extension.
1303 * @param pSession The session to close (and free).
1304 */
1305void VbgdCommonCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1306{
1307#ifdef VBOX_WITH_HGCM
1308 unsigned i;
1309#endif
1310 LogFlow(("VbgdCommonCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1311 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1312
1313 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1314 RTListNodeRemove(&pSession->ListNode);
1315 pDevExt->cSessions--;
1316 RTSpinlockRelease(pDevExt->SessionSpinlock);
1317 vbgdAcquireSessionCapabilities(pDevExt, pSession, 0, UINT32_MAX, VBOXGUESTCAPSACQUIRE_FLAGS_NONE,
1318 true /*fSessionTermination*/);
1319 vbgdSetSessionCapabilities(pDevExt, pSession, 0 /*fOrMask*/, UINT32_MAX /*fNotMask*/, true /*fSessionTermination*/);
1320 vbgdSetSessionEventFilter(pDevExt, pSession, 0 /*fOrMask*/, UINT32_MAX /*fNotMask*/, true /*fSessionTermination*/);
1321 vbgdSetSessionMouseStatus(pDevExt, pSession, 0 /*fOrMask*/, UINT32_MAX /*fNotMask*/, true /*fSessionTermination*/);
1322
1323 vbgdIoCtl_CancelAllWaitEvents(pDevExt, pSession);
1324
1325#ifdef VBOX_WITH_HGCM
1326 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1327 if (pSession->aHGCMClientIds[i])
1328 {
1329 VBoxGuestHGCMDisconnectInfo Info;
1330 Info.result = 0;
1331 Info.u32ClientID = pSession->aHGCMClientIds[i];
1332 pSession->aHGCMClientIds[i] = 0;
1333 Log(("VbgdCommonCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
1334 VbglR0HGCMInternalDisconnect(&Info, vbgdHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1335 }
1336#endif
1337
1338 pSession->pDevExt = NULL;
1339 pSession->Process = NIL_RTPROCESS;
1340 pSession->R0Process = NIL_RTR0PROCESS;
1341 vbgdCloseMemBalloon(pDevExt, pSession);
1342 RTMemFree(pSession);
1343}
1344
1345
1346/**
1347 * Allocates a wait-for-event entry.
1348 *
1349 * @returns The wait-for-event entry.
1350 * @param pDevExt The device extension.
1351 * @param pSession The session that's allocating this. Can be NULL.
1352 */
1353static PVBOXGUESTWAIT vbgdWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1354{
1355 /*
1356 * Allocate it one way or the other.
1357 */
1358 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1359 if (pWait)
1360 {
1361 RTSpinlockAcquire(pDevExt->EventSpinlock);
1362
1363 pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1364 if (pWait)
1365 RTListNodeRemove(&pWait->ListNode);
1366
1367 RTSpinlockRelease(pDevExt->EventSpinlock);
1368 }
1369 if (!pWait)
1370 {
1371 int rc;
1372
1373 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1374 if (!pWait)
1375 {
1376 LogRelMax(32, ("vbgdWaitAlloc: out-of-memory!\n"));
1377 return NULL;
1378 }
1379
1380 rc = RTSemEventMultiCreate(&pWait->Event);
1381 if (RT_FAILURE(rc))
1382 {
1383 LogRelMax(32, ("VbgdCommonIoCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
1384 RTMemFree(pWait);
1385 return NULL;
1386 }
1387
1388 pWait->ListNode.pNext = NULL;
1389 pWait->ListNode.pPrev = NULL;
1390 }
1391
1392 /*
1393 * Zero members just as an precaution.
1394 */
1395 pWait->fReqEvents = 0;
1396 pWait->fResEvents = 0;
1397#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1398 pWait->fPendingWakeUp = false;
1399 pWait->fFreeMe = false;
1400#endif
1401 pWait->pSession = pSession;
1402#ifdef VBOX_WITH_HGCM
1403 pWait->pHGCMReq = NULL;
1404#endif
1405 RTSemEventMultiReset(pWait->Event);
1406 return pWait;
1407}
1408
1409
1410/**
1411 * Frees the wait-for-event entry.
1412 *
1413 * The caller must own the wait spinlock !
1414 * The entry must be in a list!
1415 *
1416 * @param pDevExt The device extension.
1417 * @param pWait The wait-for-event entry to free.
1418 */
1419static void vbgdWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1420{
1421 pWait->fReqEvents = 0;
1422 pWait->fResEvents = 0;
1423#ifdef VBOX_WITH_HGCM
1424 pWait->pHGCMReq = NULL;
1425#endif
1426#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1427 Assert(!pWait->fFreeMe);
1428 if (pWait->fPendingWakeUp)
1429 pWait->fFreeMe = true;
1430 else
1431#endif
1432 {
1433 RTListNodeRemove(&pWait->ListNode);
1434 RTListAppend(&pDevExt->FreeList, &pWait->ListNode);
1435 }
1436}
1437
1438
1439/**
1440 * Frees the wait-for-event entry.
1441 *
1442 * @param pDevExt The device extension.
1443 * @param pWait The wait-for-event entry to free.
1444 */
1445static void vbgdWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1446{
1447 RTSpinlockAcquire(pDevExt->EventSpinlock);
1448 vbgdWaitFreeLocked(pDevExt, pWait);
1449 RTSpinlockRelease(pDevExt->EventSpinlock);
1450}
1451
1452
1453#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1454/**
1455 * Processes the wake-up list.
1456 *
1457 * All entries in the wake-up list gets signalled and moved to the woken-up
1458 * list.
1459 *
1460 * @param pDevExt The device extension.
1461 */
1462void VbgdCommonWaitDoWakeUps(PVBOXGUESTDEVEXT pDevExt)
1463{
1464 if (!RTListIsEmpty(&pDevExt->WakeUpList))
1465 {
1466 RTSpinlockAcquire(pDevExt->EventSpinlock);
1467 for (;;)
1468 {
1469 int rc;
1470 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->WakeUpList, VBOXGUESTWAIT, ListNode);
1471 if (!pWait)
1472 break;
1473 pWait->fPendingWakeUp = true;
1474 RTSpinlockRelease(pDevExt->EventSpinlock);
1475
1476 rc = RTSemEventMultiSignal(pWait->Event);
1477 AssertRC(rc);
1478
1479 RTSpinlockAcquire(pDevExt->EventSpinlock);
1480 pWait->fPendingWakeUp = false;
1481 if (!pWait->fFreeMe)
1482 {
1483 RTListNodeRemove(&pWait->ListNode);
1484 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1485 }
1486 else
1487 {
1488 pWait->fFreeMe = false;
1489 vbgdWaitFreeLocked(pDevExt, pWait);
1490 }
1491 }
1492 RTSpinlockRelease(pDevExt->EventSpinlock);
1493 }
1494}
1495#endif /* VBOXGUEST_USE_DEFERRED_WAKE_UP */
1496
1497
1498/**
1499 * Implements the fast (no input or output) type of IOCtls.
1500 *
1501 * This is currently just a placeholder stub inherited from the support driver code.
1502 *
1503 * @returns VBox status code.
1504 * @param iFunction The IOCtl function number.
1505 * @param pDevExt The device extension.
1506 * @param pSession The session.
1507 */
1508int VbgdCommonIoCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1509{
1510 LogFlow(("VbgdCommonIoCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1511
1512 NOREF(iFunction);
1513 NOREF(pDevExt);
1514 NOREF(pSession);
1515 return VERR_NOT_SUPPORTED;
1516}
1517
1518
1519/**
1520 * Return the VMM device port.
1521 *
1522 * returns IPRT status code.
1523 * @param pDevExt The device extension.
1524 * @param pInfo The request info.
1525 * @param pcbDataReturned (out) contains the number of bytes to return.
1526 */
1527static int vbgdIoCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
1528{
1529 LogFlow(("VbgdCommonIoCtl: GETVMMDEVPORT\n"));
1530
1531 pInfo->portAddress = pDevExt->IOPortBase;
1532 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
1533 if (pcbDataReturned)
1534 *pcbDataReturned = sizeof(*pInfo);
1535 return VINF_SUCCESS;
1536}
1537
1538
1539#ifndef RT_OS_WINDOWS
1540/**
1541 * Set the callback for the kernel mouse handler.
1542 *
1543 * returns IPRT status code.
1544 * @param pDevExt The device extension.
1545 * @param pNotify The new callback information.
1546 */
1547int vbgdIoCtl_SetMouseNotifyCallback(PVBOXGUESTDEVEXT pDevExt, VBoxGuestMouseSetNotifyCallback *pNotify)
1548{
1549 LogFlow(("VbgdCommonIoCtl: SET_MOUSE_NOTIFY_CALLBACK\n"));
1550
1551 RTSpinlockAcquire(pDevExt->EventSpinlock);
1552 pDevExt->MouseNotifyCallback = *pNotify;
1553 RTSpinlockRelease(pDevExt->EventSpinlock);
1554 return VINF_SUCCESS;
1555}
1556#endif
1557
1558
1559/**
1560 * Worker vbgdIoCtl_WaitEvent.
1561 *
1562 * The caller enters the spinlock, we leave it.
1563 *
1564 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
1565 */
1566DECLINLINE(int) vbdgCheckWaitEventCondition(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1567 VBoxGuestWaitEventInfo *pInfo, int iEvent, const uint32_t fReqEvents)
1568{
1569 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
1570 if (fMatches & VBOXGUEST_ACQUIRE_STYLE_EVENTS)
1571 fMatches &= vbgdGetAllowedEventMaskForSession(pDevExt, pSession);
1572 if (fMatches || pSession->fPendingCancelWaitEvents)
1573 {
1574 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
1575 RTSpinlockRelease(pDevExt->EventSpinlock);
1576
1577 pInfo->u32EventFlagsOut = fMatches;
1578 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1579 if (fReqEvents & ~((uint32_t)1 << iEvent))
1580 LogFlow(("VbgdCommonIoCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1581 else
1582 LogFlow(("VbgdCommonIoCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1583 pSession->fPendingCancelWaitEvents = false;
1584 return VINF_SUCCESS;
1585 }
1586
1587 RTSpinlockRelease(pDevExt->EventSpinlock);
1588 return VERR_TIMEOUT;
1589}
1590
1591
1592static int vbgdIoCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1593 VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned, bool fInterruptible)
1594{
1595 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
1596 uint32_t fResEvents;
1597 int iEvent;
1598 PVBOXGUESTWAIT pWait;
1599 int rc;
1600
1601 pInfo->u32EventFlagsOut = 0;
1602 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1603 if (pcbDataReturned)
1604 *pcbDataReturned = sizeof(*pInfo);
1605
1606 /*
1607 * Copy and verify the input mask.
1608 */
1609 iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
1610 if (RT_UNLIKELY(iEvent < 0))
1611 {
1612 LogRel(("VbgdCommonIoCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
1613 return VERR_INVALID_PARAMETER;
1614 }
1615
1616 /*
1617 * Check the condition up front, before doing the wait-for-event allocations.
1618 */
1619 RTSpinlockAcquire(pDevExt->EventSpinlock);
1620 rc = vbdgCheckWaitEventCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
1621 if (rc == VINF_SUCCESS)
1622 return rc;
1623
1624 if (!pInfo->u32TimeoutIn)
1625 {
1626 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1627 LogFlow(("VbgdCommonIoCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
1628 return VERR_TIMEOUT;
1629 }
1630
1631 pWait = vbgdWaitAlloc(pDevExt, pSession);
1632 if (!pWait)
1633 return VERR_NO_MEMORY;
1634 pWait->fReqEvents = fReqEvents;
1635
1636 /*
1637 * We've got the wait entry now, re-enter the spinlock and check for the condition.
1638 * If the wait condition is met, return.
1639 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
1640 */
1641 RTSpinlockAcquire(pDevExt->EventSpinlock);
1642 RTListAppend(&pDevExt->WaitList, &pWait->ListNode);
1643 rc = vbdgCheckWaitEventCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
1644 if (rc == VINF_SUCCESS)
1645 {
1646 vbgdWaitFreeUnlocked(pDevExt, pWait);
1647 return rc;
1648 }
1649
1650 if (fInterruptible)
1651 rc = RTSemEventMultiWaitNoResume(pWait->Event,
1652 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1653 else
1654 rc = RTSemEventMultiWait(pWait->Event,
1655 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1656
1657 /*
1658 * There is one special case here and that's when the semaphore is
1659 * destroyed upon device driver unload. This shouldn't happen of course,
1660 * but in case it does, just get out of here ASAP.
1661 */
1662 if (rc == VERR_SEM_DESTROYED)
1663 return rc;
1664
1665 /*
1666 * Unlink the wait item and dispose of it.
1667 */
1668 RTSpinlockAcquire(pDevExt->EventSpinlock);
1669 fResEvents = pWait->fResEvents;
1670 vbgdWaitFreeLocked(pDevExt, pWait);
1671 RTSpinlockRelease(pDevExt->EventSpinlock);
1672
1673 /*
1674 * Now deal with the return code.
1675 */
1676 if ( fResEvents
1677 && fResEvents != UINT32_MAX)
1678 {
1679 pInfo->u32EventFlagsOut = fResEvents;
1680 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1681 if (fReqEvents & ~((uint32_t)1 << iEvent))
1682 LogFlow(("VbgdCommonIoCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1683 else
1684 LogFlow(("VbgdCommonIoCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1685 rc = VINF_SUCCESS;
1686 }
1687 else if ( fResEvents == UINT32_MAX
1688 || rc == VERR_INTERRUPTED)
1689 {
1690 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
1691 rc = VERR_INTERRUPTED;
1692 LogFlow(("VbgdCommonIoCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
1693 }
1694 else if (rc == VERR_TIMEOUT)
1695 {
1696 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1697 LogFlow(("VbgdCommonIoCtl: WAITEVENT: returns VERR_TIMEOUT (2)\n"));
1698 }
1699 else
1700 {
1701 if (RT_SUCCESS(rc))
1702 {
1703 LogRelMax(32, ("VbgdCommonIoCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
1704 rc = VERR_INTERNAL_ERROR;
1705 }
1706 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1707 LogFlow(("VbgdCommonIoCtl: WAITEVENT: returns %Rrc\n", rc));
1708 }
1709
1710 return rc;
1711}
1712
1713
1714static int vbgdIoCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1715{
1716 PVBOXGUESTWAIT pWait;
1717 PVBOXGUESTWAIT pSafe;
1718 int rc = 0;
1719 /* Was as least one WAITEVENT in process for this session? If not we
1720 * set a flag that the next call should be interrupted immediately. This
1721 * is needed so that a user thread can reliably interrupt another one in a
1722 * WAITEVENT loop. */
1723 bool fCancelledOne = false;
1724
1725 LogFlow(("VbgdCommonIoCtl: CANCEL_ALL_WAITEVENTS\n"));
1726
1727 /*
1728 * Walk the event list and wake up anyone with a matching session.
1729 */
1730 RTSpinlockAcquire(pDevExt->EventSpinlock);
1731 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
1732 {
1733 if (pWait->pSession == pSession)
1734 {
1735 fCancelledOne = true;
1736 pWait->fResEvents = UINT32_MAX;
1737 RTListNodeRemove(&pWait->ListNode);
1738#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1739 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
1740#else
1741 rc |= RTSemEventMultiSignal(pWait->Event);
1742 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1743#endif
1744 }
1745 }
1746 if (!fCancelledOne)
1747 pSession->fPendingCancelWaitEvents = true;
1748 RTSpinlockRelease(pDevExt->EventSpinlock);
1749 Assert(rc == 0);
1750 NOREF(rc);
1751
1752#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1753 VbgdCommonWaitDoWakeUps(pDevExt);
1754#endif
1755
1756 return VINF_SUCCESS;
1757}
1758
1759/**
1760 * Checks if the VMM request is allowed in the context of the given session.
1761 *
1762 * @returns VINF_SUCCESS or VERR_PERMISSION_DENIED.
1763 * @param pSession The calling session.
1764 * @param enmType The request type.
1765 * @param pReqHdr The request.
1766 */
1767static int vbgdCheckIfVmmReqIsAllowed(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VMMDevRequestType enmType,
1768 VMMDevRequestHeader const *pReqHdr)
1769{
1770 /*
1771 * Categorize the request being made.
1772 */
1773 /** @todo This need quite some more work! */
1774 enum
1775 {
1776 kLevel_Invalid, kLevel_NoOne, kLevel_OnlyVBoxGuest, kLevel_OnlyKernel, kLevel_TrustedUsers, kLevel_AllUsers
1777 } enmRequired;
1778 switch (enmType)
1779 {
1780 /*
1781 * Deny access to anything we don't know or provide specialized I/O controls for.
1782 */
1783#ifdef VBOX_WITH_HGCM
1784 case VMMDevReq_HGCMConnect:
1785 case VMMDevReq_HGCMDisconnect:
1786# ifdef VBOX_WITH_64_BITS_GUESTS
1787 case VMMDevReq_HGCMCall32:
1788 case VMMDevReq_HGCMCall64:
1789# else
1790 case VMMDevReq_HGCMCall:
1791# endif /* VBOX_WITH_64_BITS_GUESTS */
1792 case VMMDevReq_HGCMCancel:
1793 case VMMDevReq_HGCMCancel2:
1794#endif /* VBOX_WITH_HGCM */
1795 case VMMDevReq_SetGuestCapabilities:
1796 default:
1797 enmRequired = kLevel_NoOne;
1798 break;
1799
1800 /*
1801 * There are a few things only this driver can do (and it doesn't use
1802 * the VMMRequst I/O control route anyway, but whatever).
1803 */
1804 case VMMDevReq_ReportGuestInfo:
1805 case VMMDevReq_ReportGuestInfo2:
1806 case VMMDevReq_GetHypervisorInfo:
1807 case VMMDevReq_SetHypervisorInfo:
1808 case VMMDevReq_RegisterPatchMemory:
1809 case VMMDevReq_DeregisterPatchMemory:
1810 case VMMDevReq_GetMemBalloonChangeRequest:
1811 enmRequired = kLevel_OnlyVBoxGuest;
1812 break;
1813
1814 /*
1815 * Trusted users apps only.
1816 */
1817 case VMMDevReq_QueryCredentials:
1818 case VMMDevReq_ReportCredentialsJudgement:
1819 case VMMDevReq_RegisterSharedModule:
1820 case VMMDevReq_UnregisterSharedModule:
1821 case VMMDevReq_WriteCoreDump:
1822 case VMMDevReq_GetCpuHotPlugRequest:
1823 case VMMDevReq_SetCpuHotPlugStatus:
1824 case VMMDevReq_CheckSharedModules:
1825 case VMMDevReq_GetPageSharingStatus:
1826 case VMMDevReq_DebugIsPageShared:
1827 case VMMDevReq_ReportGuestStats:
1828 case VMMDevReq_ReportGuestUserState:
1829 case VMMDevReq_GetStatisticsChangeRequest:
1830 case VMMDevReq_ChangeMemBalloon:
1831 enmRequired = kLevel_TrustedUsers;
1832 break;
1833
1834 /*
1835 * Anyone.
1836 */
1837 case VMMDevReq_GetMouseStatus:
1838 case VMMDevReq_SetMouseStatus:
1839 case VMMDevReq_SetPointerShape:
1840 case VMMDevReq_GetHostVersion:
1841 case VMMDevReq_Idle:
1842 case VMMDevReq_GetHostTime:
1843 case VMMDevReq_SetPowerStatus:
1844 case VMMDevReq_AcknowledgeEvents:
1845 case VMMDevReq_CtlGuestFilterMask:
1846 case VMMDevReq_ReportGuestStatus:
1847 case VMMDevReq_GetDisplayChangeRequest:
1848 case VMMDevReq_VideoModeSupported:
1849 case VMMDevReq_GetHeightReduction:
1850 case VMMDevReq_GetDisplayChangeRequest2:
1851 case VMMDevReq_VideoModeSupported2:
1852 case VMMDevReq_VideoAccelEnable:
1853 case VMMDevReq_VideoAccelFlush:
1854 case VMMDevReq_VideoSetVisibleRegion:
1855 case VMMDevReq_GetDisplayChangeRequestEx:
1856 case VMMDevReq_GetSeamlessChangeRequest:
1857 case VMMDevReq_GetVRDPChangeRequest:
1858 case VMMDevReq_LogString:
1859 case VMMDevReq_GetSessionId:
1860 enmRequired = kLevel_AllUsers;
1861 break;
1862
1863 /*
1864 * Depends on the request parameters...
1865 */
1866 /** @todo this have to be changed into an I/O control and the facilities
1867 * tracked in the session so they can automatically be failed when the
1868 * session terminates without reporting the new status.
1869 *
1870 * The information presented by IGuest is not reliable without this! */
1871 case VMMDevReq_ReportGuestCapabilities:
1872 switch (((VMMDevReportGuestStatus const *)pReqHdr)->guestStatus.facility)
1873 {
1874 case VBoxGuestFacilityType_All:
1875 case VBoxGuestFacilityType_VBoxGuestDriver:
1876 enmRequired = kLevel_OnlyVBoxGuest;
1877 break;
1878 case VBoxGuestFacilityType_VBoxService:
1879 enmRequired = kLevel_TrustedUsers;
1880 break;
1881 case VBoxGuestFacilityType_VBoxTrayClient:
1882 case VBoxGuestFacilityType_Seamless:
1883 case VBoxGuestFacilityType_Graphics:
1884 default:
1885 enmRequired = kLevel_AllUsers;
1886 break;
1887 }
1888 break;
1889 }
1890
1891 /*
1892 * Check against the session.
1893 */
1894 switch (enmRequired)
1895 {
1896 default:
1897 case kLevel_NoOne:
1898 break;
1899 case kLevel_OnlyVBoxGuest:
1900 case kLevel_OnlyKernel:
1901 if (pSession->R0Process == NIL_RTR0PROCESS)
1902 return VINF_SUCCESS;
1903 break;
1904 case kLevel_TrustedUsers:
1905 case kLevel_AllUsers:
1906 return VINF_SUCCESS;
1907 }
1908
1909 return VERR_PERMISSION_DENIED;
1910}
1911
1912static int vbgdIoCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1913 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
1914{
1915 int rc;
1916 VMMDevRequestHeader *pReqCopy;
1917
1918 /*
1919 * Validate the header and request size.
1920 */
1921 const VMMDevRequestType enmType = pReqHdr->requestType;
1922 const uint32_t cbReq = pReqHdr->size;
1923 const uint32_t cbMinSize = (uint32_t)vmmdevGetRequestSize(enmType);
1924
1925 LogFlow(("VbgdCommonIoCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
1926
1927 if (cbReq < cbMinSize)
1928 {
1929 LogRel(("VbgdCommonIoCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
1930 cbReq, cbMinSize, enmType));
1931 return VERR_INVALID_PARAMETER;
1932 }
1933 if (cbReq > cbData)
1934 {
1935 LogRel(("VbgdCommonIoCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
1936 cbData, cbReq, enmType));
1937 return VERR_INVALID_PARAMETER;
1938 }
1939 rc = VbglGRVerify(pReqHdr, cbData);
1940 if (RT_FAILURE(rc))
1941 {
1942 Log(("VbgdCommonIoCtl: VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc!!\n",
1943 cbData, cbReq, enmType, rc));
1944 return rc;
1945 }
1946
1947 rc = vbgdCheckIfVmmReqIsAllowed(pDevExt, pSession, enmType, pReqHdr);
1948 if (RT_FAILURE(rc))
1949 {
1950 Log(("VbgdCommonIoCtl: VMMREQUEST: Operation not allowed! type=%#x rc=%Rrc\n", enmType, rc));
1951 return rc;
1952 }
1953
1954 /*
1955 * Make a copy of the request in the physical memory heap so
1956 * the VBoxGuestLibrary can more easily deal with the request.
1957 * (This is really a waste of time since the OS or the OS specific
1958 * code has already buffered or locked the input/output buffer, but
1959 * it does makes things a bit simpler wrt to phys address.)
1960 */
1961 rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
1962 if (RT_FAILURE(rc))
1963 {
1964 Log(("VbgdCommonIoCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1965 cbReq, cbReq, rc));
1966 return rc;
1967 }
1968 memcpy(pReqCopy, pReqHdr, cbReq);
1969
1970 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
1971 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
1972
1973 rc = VbglGRPerform(pReqCopy);
1974 if ( RT_SUCCESS(rc)
1975 && RT_SUCCESS(pReqCopy->rc))
1976 {
1977 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
1978 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
1979
1980 memcpy(pReqHdr, pReqCopy, cbReq);
1981 if (pcbDataReturned)
1982 *pcbDataReturned = cbReq;
1983 }
1984 else if (RT_FAILURE(rc))
1985 Log(("VbgdCommonIoCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
1986 else
1987 {
1988 Log(("VbgdCommonIoCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
1989 rc = pReqCopy->rc;
1990 }
1991
1992 VbglGRFree(pReqCopy);
1993 return rc;
1994}
1995
1996
1997#ifdef VBOX_WITH_HGCM
1998
1999AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
2000
2001/** Worker for vbgdHgcmAsyncWaitCallback*. */
2002static int vbgdHgcmAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
2003 bool fInterruptible, uint32_t cMillies)
2004{
2005 int rc;
2006
2007 /*
2008 * Check to see if the condition was met by the time we got here.
2009 *
2010 * We create a simple poll loop here for dealing with out-of-memory
2011 * conditions since the caller isn't necessarily able to deal with
2012 * us returning too early.
2013 */
2014 PVBOXGUESTWAIT pWait;
2015 for (;;)
2016 {
2017 RTSpinlockAcquire(pDevExt->EventSpinlock);
2018 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
2019 {
2020 RTSpinlockRelease(pDevExt->EventSpinlock);
2021 return VINF_SUCCESS;
2022 }
2023 RTSpinlockRelease(pDevExt->EventSpinlock);
2024
2025 pWait = vbgdWaitAlloc(pDevExt, NULL);
2026 if (pWait)
2027 break;
2028 if (fInterruptible)
2029 return VERR_INTERRUPTED;
2030 RTThreadSleep(1);
2031 }
2032 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
2033 pWait->pHGCMReq = pHdr;
2034
2035 /*
2036 * Re-enter the spinlock and re-check for the condition.
2037 * If the condition is met, return.
2038 * Otherwise link us into the HGCM wait list and go to sleep.
2039 */
2040 RTSpinlockAcquire(pDevExt->EventSpinlock);
2041 RTListAppend(&pDevExt->HGCMWaitList, &pWait->ListNode);
2042 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
2043 {
2044 vbgdWaitFreeLocked(pDevExt, pWait);
2045 RTSpinlockRelease(pDevExt->EventSpinlock);
2046 return VINF_SUCCESS;
2047 }
2048 RTSpinlockRelease(pDevExt->EventSpinlock);
2049
2050 if (fInterruptible)
2051 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
2052 else
2053 rc = RTSemEventMultiWait(pWait->Event, cMillies);
2054 if (rc == VERR_SEM_DESTROYED)
2055 return rc;
2056
2057 /*
2058 * Unlink, free and return.
2059 */
2060 if ( RT_FAILURE(rc)
2061 && rc != VERR_TIMEOUT
2062 && ( !fInterruptible
2063 || rc != VERR_INTERRUPTED))
2064 LogRel(("vbgdHgcmAsyncWaitCallback: wait failed! %Rrc\n", rc));
2065
2066 vbgdWaitFreeUnlocked(pDevExt, pWait);
2067 return rc;
2068}
2069
2070
2071/**
2072 * This is a callback for dealing with async waits.
2073 *
2074 * It operates in a manner similar to vbgdIoCtl_WaitEvent.
2075 */
2076static DECLCALLBACK(int) vbgdHgcmAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
2077{
2078 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
2079 LogFlow(("vbgdHgcmAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
2080 return vbgdHgcmAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr, pDevExt,
2081 false /* fInterruptible */, u32User /* cMillies */);
2082}
2083
2084
2085/**
2086 * This is a callback for dealing with async waits with a timeout.
2087 *
2088 * It operates in a manner similar to vbgdIoCtl_WaitEvent.
2089 */
2090static DECLCALLBACK(int) vbgdHgcmAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
2091{
2092 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
2093 LogFlow(("vbgdHgcmAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
2094 return vbgdHgcmAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr, pDevExt,
2095 true /* fInterruptible */, u32User /* cMillies */);
2096}
2097
2098
2099static int vbgdIoCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2100 VBoxGuestHGCMConnectInfo *pInfo, size_t *pcbDataReturned)
2101{
2102 int rc;
2103
2104 /*
2105 * The VbglHGCMConnect call will invoke the callback if the HGCM
2106 * call is performed in an ASYNC fashion. The function is not able
2107 * to deal with cancelled requests.
2108 */
2109 Log(("VbgdCommonIoCtl: HGCM_CONNECT: %.128s\n",
2110 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
2111 ? pInfo->Loc.u.host.achName : "<not local host>"));
2112
2113 rc = VbglR0HGCMInternalConnect(pInfo, vbgdHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2114 if (RT_SUCCESS(rc))
2115 {
2116 Log(("VbgdCommonIoCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
2117 pInfo->u32ClientID, pInfo->result, rc));
2118 if (RT_SUCCESS(pInfo->result))
2119 {
2120 /*
2121 * Append the client id to the client id table.
2122 * If the table has somehow become filled up, we'll disconnect the session.
2123 */
2124 unsigned i;
2125 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2126 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2127 if (!pSession->aHGCMClientIds[i])
2128 {
2129 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
2130 break;
2131 }
2132 RTSpinlockRelease(pDevExt->SessionSpinlock);
2133 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
2134 {
2135 VBoxGuestHGCMDisconnectInfo Info;
2136 LogRelMax(32, ("VbgdCommonIoCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
2137 Info.result = 0;
2138 Info.u32ClientID = pInfo->u32ClientID;
2139 VbglR0HGCMInternalDisconnect(&Info, vbgdHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2140 return VERR_TOO_MANY_OPEN_FILES;
2141 }
2142 }
2143 else
2144 rc = pInfo->result;
2145 if (pcbDataReturned)
2146 *pcbDataReturned = sizeof(*pInfo);
2147 }
2148 return rc;
2149}
2150
2151
2152static int vbgdIoCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2153 VBoxGuestHGCMDisconnectInfo *pInfo, size_t *pcbDataReturned)
2154{
2155 /*
2156 * Validate the client id and invalidate its entry while we're in the call.
2157 */
2158 int rc;
2159 const uint32_t u32ClientId = pInfo->u32ClientID;
2160 unsigned i;
2161 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2162 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2163 if (pSession->aHGCMClientIds[i] == u32ClientId)
2164 {
2165 pSession->aHGCMClientIds[i] = UINT32_MAX;
2166 break;
2167 }
2168 RTSpinlockRelease(pDevExt->SessionSpinlock);
2169 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
2170 {
2171 LogRelMax(32, ("VbgdCommonIoCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
2172 return VERR_INVALID_HANDLE;
2173 }
2174
2175 /*
2176 * The VbglHGCMConnect call will invoke the callback if the HGCM
2177 * call is performed in an ASYNC fashion. The function is not able
2178 * to deal with cancelled requests.
2179 */
2180 Log(("VbgdCommonIoCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
2181 rc = VbglR0HGCMInternalDisconnect(pInfo, vbgdHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2182 if (RT_SUCCESS(rc))
2183 {
2184 LogFlow(("VbgdCommonIoCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
2185 if (pcbDataReturned)
2186 *pcbDataReturned = sizeof(*pInfo);
2187 }
2188
2189 /* Update the client id array according to the result. */
2190 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2191 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
2192 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
2193 RTSpinlockRelease(pDevExt->SessionSpinlock);
2194
2195 return rc;
2196}
2197
2198
2199static int vbgdIoCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMCallInfo *pInfo,
2200 uint32_t cMillies, bool fInterruptible, bool f32bit, bool fUserData,
2201 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
2202{
2203 const uint32_t u32ClientId = pInfo->u32ClientID;
2204 uint32_t fFlags;
2205 size_t cbActual;
2206 unsigned i;
2207 int rc;
2208
2209 /*
2210 * Some more validations.
2211 */
2212 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
2213 {
2214 LogRel(("VbgdCommonIoCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
2215 return VERR_INVALID_PARAMETER;
2216 }
2217
2218 cbActual = cbExtra + sizeof(*pInfo);
2219#ifdef RT_ARCH_AMD64
2220 if (f32bit)
2221 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
2222 else
2223#endif
2224 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
2225 if (cbData < cbActual)
2226 {
2227 LogRel(("VbgdCommonIoCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
2228 cbData, cbData, cbActual, cbActual));
2229 return VERR_INVALID_PARAMETER;
2230 }
2231
2232 /*
2233 * Validate the client id.
2234 */
2235 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2236 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2237 if (pSession->aHGCMClientIds[i] == u32ClientId)
2238 break;
2239 RTSpinlockRelease(pDevExt->SessionSpinlock);
2240 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
2241 {
2242 LogRelMax(32, ("VbgdCommonIoCtl: HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
2243 return VERR_INVALID_HANDLE;
2244 }
2245
2246 /*
2247 * The VbglHGCMCall call will invoke the callback if the HGCM
2248 * call is performed in an ASYNC fashion. This function can
2249 * deal with cancelled requests, so we let user more requests
2250 * be interruptible (should add a flag for this later I guess).
2251 */
2252 LogFlow(("VbgdCommonIoCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
2253 fFlags = !fUserData && pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
2254 uint32_t cbInfo = (uint32_t)(cbData - cbExtra);
2255#ifdef RT_ARCH_AMD64
2256 if (f32bit)
2257 {
2258 if (fInterruptible)
2259 rc = VbglR0HGCMInternalCall32(pInfo, cbInfo, fFlags, vbgdHgcmAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2260 else
2261 rc = VbglR0HGCMInternalCall32(pInfo, cbInfo, fFlags, vbgdHgcmAsyncWaitCallback, pDevExt, cMillies);
2262 }
2263 else
2264#endif
2265 {
2266 if (fInterruptible)
2267 rc = VbglR0HGCMInternalCall(pInfo, cbInfo, fFlags, vbgdHgcmAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2268 else
2269 rc = VbglR0HGCMInternalCall(pInfo, cbInfo, fFlags, vbgdHgcmAsyncWaitCallback, pDevExt, cMillies);
2270 }
2271 if (RT_SUCCESS(rc))
2272 {
2273 LogFlow(("VbgdCommonIoCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
2274 if (pcbDataReturned)
2275 *pcbDataReturned = cbActual;
2276 }
2277 else
2278 {
2279 if ( rc != VERR_INTERRUPTED
2280 && rc != VERR_TIMEOUT)
2281 LogRelMax(32, ("VbgdCommonIoCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
2282 else
2283 Log(("VbgdCommonIoCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
2284 }
2285 return rc;
2286}
2287
2288#endif /* VBOX_WITH_HGCM */
2289
2290/**
2291 * Handle VBOXGUEST_IOCTL_CHECK_BALLOON from R3.
2292 *
2293 * Ask the host for the size of the balloon and try to set it accordingly. If
2294 * this approach fails because it's not supported, return with fHandleInR3 set
2295 * and let the user land supply memory we can lock via the other ioctl.
2296 *
2297 * @returns VBox status code.
2298 *
2299 * @param pDevExt The device extension.
2300 * @param pSession The session.
2301 * @param pInfo The output buffer.
2302 * @param pcbDataReturned Where to store the amount of returned data. Can
2303 * be NULL.
2304 */
2305static int vbgdIoCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2306 VBoxGuestCheckBalloonInfo *pInfo, size_t *pcbDataReturned)
2307{
2308 VMMDevGetMemBalloonChangeRequest *pReq;
2309 int rc;
2310
2311 LogFlow(("VbgdCommonIoCtl: CHECK_MEMORY_BALLOON\n"));
2312 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2313 AssertRCReturn(rc, rc);
2314
2315 /*
2316 * The first user trying to query/change the balloon becomes the
2317 * owner and owns it until the session is closed (vbgdCloseMemBalloon).
2318 */
2319 if ( pDevExt->MemBalloon.pOwner != pSession
2320 && pDevExt->MemBalloon.pOwner == NULL)
2321 pDevExt->MemBalloon.pOwner = pSession;
2322
2323 if (pDevExt->MemBalloon.pOwner == pSession)
2324 {
2325 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
2326 if (RT_SUCCESS(rc))
2327 {
2328 /*
2329 * This is a response to that event. Setting this bit means that
2330 * we request the value from the host and change the guest memory
2331 * balloon according to this value.
2332 */
2333 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
2334 rc = VbglGRPerform(&pReq->header);
2335 if (RT_SUCCESS(rc))
2336 {
2337 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
2338 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
2339
2340 pInfo->cBalloonChunks = pReq->cBalloonChunks;
2341 pInfo->fHandleInR3 = false;
2342
2343 rc = vbgdSetBalloonSizeKernel(pDevExt, pReq->cBalloonChunks, &pInfo->fHandleInR3);
2344 /* Ignore various out of memory failures. */
2345 if ( rc == VERR_NO_MEMORY
2346 || rc == VERR_NO_PHYS_MEMORY
2347 || rc == VERR_NO_CONT_MEMORY)
2348 rc = VINF_SUCCESS;
2349
2350 if (pcbDataReturned)
2351 *pcbDataReturned = sizeof(VBoxGuestCheckBalloonInfo);
2352 }
2353 else
2354 LogRel(("VbgdCommonIoCtl: CHECK_MEMORY_BALLOON: VbglGRPerform failed. rc=%Rrc\n", rc));
2355 VbglGRFree(&pReq->header);
2356 }
2357 }
2358 else
2359 rc = VERR_PERMISSION_DENIED;
2360
2361 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2362 LogFlow(("VbgdCommonIoCtl: CHECK_MEMORY_BALLOON returns %Rrc\n", rc));
2363 return rc;
2364}
2365
2366
2367/**
2368 * Handle a request for changing the memory balloon.
2369 *
2370 * @returns VBox status code.
2371 *
2372 * @param pDevExt The device extention.
2373 * @param pSession The session.
2374 * @param pInfo The change request structure (input).
2375 * @param pcbDataReturned Where to store the amount of returned data. Can
2376 * be NULL.
2377 */
2378static int vbgdIoCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2379 VBoxGuestChangeBalloonInfo *pInfo, size_t *pcbDataReturned)
2380{
2381 int rc;
2382
2383 LogFlow(("VbgdCommonIoCtl: CHANGE_BALLOON\n"));
2384 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2385 AssertRCReturn(rc, rc);
2386
2387 if (!pDevExt->MemBalloon.fUseKernelAPI)
2388 {
2389 /*
2390 * The first user trying to query/change the balloon becomes the
2391 * owner and owns it until the session is closed (vbgdCloseMemBalloon).
2392 */
2393 if ( pDevExt->MemBalloon.pOwner != pSession
2394 && pDevExt->MemBalloon.pOwner == NULL)
2395 pDevExt->MemBalloon.pOwner = pSession;
2396
2397 if (pDevExt->MemBalloon.pOwner == pSession)
2398 {
2399 rc = vbgdSetBalloonSizeFromUser(pDevExt, pSession, pInfo->u64ChunkAddr, !!pInfo->fInflate);
2400 if (pcbDataReturned)
2401 *pcbDataReturned = 0;
2402 }
2403 else
2404 rc = VERR_PERMISSION_DENIED;
2405 }
2406 else
2407 rc = VERR_PERMISSION_DENIED;
2408
2409 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2410 return rc;
2411}
2412
2413
2414/**
2415 * Handle a request for writing a core dump of the guest on the host.
2416 *
2417 * @returns VBox status code.
2418 *
2419 * @param pDevExt The device extension.
2420 * @param pInfo The output buffer.
2421 */
2422static int vbgdIoCtl_WriteCoreDump(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWriteCoreDump *pInfo)
2423{
2424 VMMDevReqWriteCoreDump *pReq = NULL;
2425 int rc;
2426
2427 LogFlow(("VbgdCommonIoCtl: WRITE_CORE_DUMP\n"));
2428 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_WriteCoreDump);
2429 if (RT_FAILURE(rc))
2430 {
2431 Log(("VbgdCommonIoCtl: WRITE_CORE_DUMP: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
2432 sizeof(*pReq), sizeof(*pReq), rc));
2433 return rc;
2434 }
2435
2436 pReq->fFlags = pInfo->fFlags;
2437 rc = VbglGRPerform(&pReq->header);
2438 if (RT_FAILURE(rc))
2439 Log(("VbgdCommonIoCtl: WRITE_CORE_DUMP: VbglGRPerform failed, rc=%Rrc!\n", rc));
2440
2441 VbglGRFree(&pReq->header);
2442 return rc;
2443}
2444
2445
2446/**
2447 * Guest backdoor logging.
2448 *
2449 * @returns VBox status code.
2450 *
2451 * @param pDevExt The device extension.
2452 * @param pch The log message (need not be NULL terminated).
2453 * @param cbData Size of the buffer.
2454 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2455 */
2456static int vbgdIoCtl_Log(PVBOXGUESTDEVEXT pDevExt, const char *pch, size_t cbData, size_t *pcbDataReturned, bool fUserSession)
2457{
2458 NOREF(pch);
2459 NOREF(cbData);
2460 if (pDevExt->fLoggingEnabled)
2461 RTLogBackdoorPrintf("%.*s", cbData, pch);
2462 else if (!fUserSession)
2463 LogRel(("%.*s", cbData, pch));
2464 else
2465 Log(("%.*s", cbData, pch));
2466 if (pcbDataReturned)
2467 *pcbDataReturned = 0;
2468 return VINF_SUCCESS;
2469}
2470
2471
2472/** @name Guest Capabilities, Mouse Status and Event Filter
2473 * @{
2474 */
2475
2476/**
2477 * Clears a bit usage tracker (init time).
2478 *
2479 * @param pTracker The tracker to clear.
2480 */
2481static void vbgdBitUsageTrackerClear(PVBOXGUESTBITUSAGETRACER pTracker)
2482{
2483 uint32_t iBit;
2484 AssertCompile(sizeof(pTracker->acPerBitUsage) == 32 * sizeof(uint32_t));
2485
2486 for (iBit = 0; iBit < 32; iBit++)
2487 pTracker->acPerBitUsage[iBit] = 0;
2488 pTracker->fMask = 0;
2489}
2490
2491
2492#ifdef VBOX_STRICT
2493/**
2494 * Checks that pTracker->fMask is correct and that the usage values are within
2495 * the valid range.
2496 *
2497 * @param pTracker The tracker.
2498 * @param cMax Max valid usage value.
2499 * @param pszWhat Identifies the tracker in assertions.
2500 */
2501static void vbgdBitUsageTrackerCheckMask(PCVBOXGUESTBITUSAGETRACER pTracker, uint32_t cMax, const char *pszWhat)
2502{
2503 uint32_t fMask = 0;
2504 uint32_t iBit;
2505 AssertCompile(sizeof(pTracker->acPerBitUsage) == 32 * sizeof(uint32_t));
2506
2507 for (iBit = 0; iBit < 32; iBit++)
2508 if (pTracker->acPerBitUsage[iBit])
2509 {
2510 fMask |= RT_BIT_32(iBit);
2511 AssertMsg(pTracker->acPerBitUsage[iBit] <= cMax,
2512 ("%s: acPerBitUsage[%u]=%#x cMax=%#x\n", pszWhat, iBit, pTracker->acPerBitUsage[iBit], cMax));
2513 }
2514
2515 AssertMsg(fMask == pTracker->fMask, ("%s: %#x vs %#x\n", pszWhat, fMask, pTracker->fMask));
2516}
2517#endif
2518
2519
2520/**
2521 * Applies a change to the bit usage tracker.
2522 *
2523 *
2524 * @returns true if the mask changed, false if not.
2525 * @param pTracker The bit usage tracker.
2526 * @param fChanged The bits to change.
2527 * @param fPrevious The previous value of the bits.
2528 * @param cMax The max valid usage value for assertions.
2529 * @param pszWhat Identifies the tracker in assertions.
2530 */
2531static bool vbgdBitUsageTrackerChange(PVBOXGUESTBITUSAGETRACER pTracker, uint32_t fChanged, uint32_t fPrevious,
2532 uint32_t cMax, const char *pszWhat)
2533{
2534 bool fGlobalChange = false;
2535 AssertCompile(sizeof(pTracker->acPerBitUsage) == 32 * sizeof(uint32_t));
2536
2537 while (fChanged)
2538 {
2539 uint32_t const iBit = ASMBitFirstSetU32(fChanged) - 1;
2540 uint32_t const fBitMask = RT_BIT_32(iBit);
2541 Assert(iBit < 32); Assert(fBitMask & fChanged);
2542
2543 if (fBitMask & fPrevious)
2544 {
2545 pTracker->acPerBitUsage[iBit] -= 1;
2546 AssertMsg(pTracker->acPerBitUsage[iBit] <= cMax,
2547 ("%s: acPerBitUsage[%u]=%#x cMax=%#x\n", pszWhat, iBit, pTracker->acPerBitUsage[iBit], cMax));
2548 if (pTracker->acPerBitUsage[iBit] == 0)
2549 {
2550 fGlobalChange = true;
2551 pTracker->fMask &= ~fBitMask;
2552 }
2553 }
2554 else
2555 {
2556 pTracker->acPerBitUsage[iBit] += 1;
2557 AssertMsg(pTracker->acPerBitUsage[iBit] > 0 && pTracker->acPerBitUsage[iBit] <= cMax,
2558 ("pTracker->acPerBitUsage[%u]=%#x cMax=%#x\n", pszWhat, iBit, pTracker->acPerBitUsage[iBit], cMax));
2559 if (pTracker->acPerBitUsage[iBit] == 1)
2560 {
2561 fGlobalChange = true;
2562 pTracker->fMask |= fBitMask;
2563 }
2564 }
2565
2566 fChanged &= ~fBitMask;
2567 }
2568
2569#ifdef VBOX_STRICT
2570 vbgdBitUsageTrackerCheckMask(pTracker, cMax, pszWhat);
2571#endif
2572 NOREF(pszWhat); NOREF(cMax);
2573 return fGlobalChange;
2574}
2575
2576
2577/**
2578 * Init and termination worker for resetting the (host) event filter on the host
2579 *
2580 * @returns VBox status code.
2581 * @param pDevExt The device extension.
2582 * @param fFixedEvents Fixed events (init time).
2583 */
2584static int vbgdResetEventFilterOnHost(PVBOXGUESTDEVEXT pDevExt, uint32_t fFixedEvents)
2585{
2586 VMMDevCtlGuestFilterMask *pReq;
2587 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
2588 if (RT_SUCCESS(rc))
2589 {
2590 pReq->u32NotMask = UINT32_MAX & ~fFixedEvents;
2591 pReq->u32OrMask = fFixedEvents;
2592 rc = VbglGRPerform(&pReq->header);
2593 if (RT_FAILURE(rc))
2594 LogRelFunc(("failed with rc=%Rrc\n", rc));
2595 VbglGRFree(&pReq->header);
2596 }
2597 return rc;
2598}
2599
2600
2601/**
2602 * Changes the event filter mask for the given session.
2603 *
2604 * This is called in response to VBOXGUEST_IOCTL_CTL_FILTER_MASK as well as to
2605 * do session cleanup.
2606 *
2607 * @returns VBox status code.
2608 * @param pDevExt The device extension.
2609 * @param pSession The session.
2610 * @param fOrMask The events to add.
2611 * @param fNotMask The events to remove.
2612 * @param fSessionTermination Set if we're called by the session cleanup code.
2613 * This tweaks the error handling so we perform
2614 * proper session cleanup even if the host
2615 * misbehaves.
2616 *
2617 * @remarks Takes the session spinlock.
2618 */
2619static int vbgdSetSessionEventFilter(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2620 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination)
2621{
2622 VMMDevCtlGuestFilterMask *pReq;
2623 uint32_t fChanged;
2624 uint32_t fPrevious;
2625 int rc;
2626
2627 /*
2628 * Preallocate a request buffer so we can do all in one go without leaving the spinlock.
2629 */
2630 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
2631 if (RT_SUCCESS(rc))
2632 { /* nothing */ }
2633 else if (!fSessionTermination)
2634 {
2635 LogRel(("vbgdSetSessionFilterMask: VbglGRAlloc failure: %Rrc\n", rc));
2636 return rc;
2637 }
2638 else
2639 pReq = NULL; /* Ignore failure, we must do session cleanup. */
2640
2641
2642 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2643
2644 /*
2645 * Apply the changes to the session mask.
2646 */
2647 fPrevious = pSession->fEventFilter;
2648 pSession->fEventFilter |= fOrMask;
2649 pSession->fEventFilter &= fNotMask;
2650
2651 /*
2652 * If anything actually changed, update the global usage counters.
2653 */
2654 fChanged = fPrevious ^ pSession->fEventFilter;
2655 if (fChanged)
2656 {
2657 bool fGlobalChange = vbgdBitUsageTrackerChange(&pDevExt->EventFilterTracker, fChanged, fPrevious,
2658 pDevExt->cSessions, "EventFilterTracker");
2659
2660 /*
2661 * If there are global changes, update the event filter on the host.
2662 */
2663 if (fGlobalChange || pDevExt->fEventFilterHost == UINT32_MAX)
2664 {
2665 Assert(pReq || fSessionTermination);
2666 if (pReq)
2667 {
2668 pReq->u32OrMask = pDevExt->fFixedEvents | pDevExt->EventFilterTracker.fMask;
2669 if (pReq->u32OrMask == pDevExt->fEventFilterHost)
2670 rc = VINF_SUCCESS;
2671 else
2672 {
2673 pDevExt->fEventFilterHost = pReq->u32OrMask;
2674 pReq->u32NotMask = ~pReq->u32OrMask;
2675 rc = VbglGRPerform(&pReq->header);
2676 if (RT_FAILURE(rc))
2677 {
2678 /*
2679 * Failed, roll back (unless it's session termination time).
2680 */
2681 pDevExt->fEventFilterHost = UINT32_MAX;
2682 if (!fSessionTermination)
2683 {
2684 vbgdBitUsageTrackerChange(&pDevExt->EventFilterTracker, fChanged, pSession->fEventFilter,
2685 pDevExt->cSessions, "EventFilterTracker");
2686 pSession->fEventFilter = fPrevious;
2687 }
2688 }
2689 }
2690 }
2691 else
2692 rc = VINF_SUCCESS;
2693 }
2694 }
2695
2696 RTSpinlockRelease(pDevExt->SessionSpinlock);
2697 if (pReq)
2698 VbglGRFree(&pReq->header);
2699 return rc;
2700}
2701
2702
2703/**
2704 * Handle VBOXGUEST_IOCTL_CTL_FILTER_MASK.
2705 *
2706 * @returns VBox status code.
2707 *
2708 * @param pDevExt The device extension.
2709 * @param pSession The session.
2710 * @param pInfo The request.
2711 */
2712static int vbgdIoCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestFilterMaskInfo *pInfo)
2713{
2714 LogFlow(("VbgdCommonIoCtl: CTL_FILTER_MASK or=%#x not=%#x\n", pInfo->u32OrMask, pInfo->u32NotMask));
2715
2716 if ((pInfo->u32OrMask | pInfo->u32NotMask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
2717 {
2718 Log(("VbgdCommonIoCtl: CTL_FILTER_MASK or=%#x not=%#x: Invalid masks!\n", pInfo->u32OrMask, pInfo->u32NotMask));
2719 return VERR_INVALID_PARAMETER;
2720 }
2721
2722 return vbgdSetSessionEventFilter(pDevExt, pSession, pInfo->u32OrMask, pInfo->u32NotMask, false /*fSessionTermination*/);
2723}
2724
2725
2726/**
2727 * Init and termination worker for set mouse feature status to zero on the host.
2728 *
2729 * @returns VBox status code.
2730 * @param pDevExt The device extension.
2731 */
2732static int vbgdResetMouseStatusOnHost(PVBOXGUESTDEVEXT pDevExt)
2733{
2734 VMMDevReqMouseStatus *pReq;
2735 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetMouseStatus);
2736 if (RT_SUCCESS(rc))
2737 {
2738 pReq->mouseFeatures = 0;
2739 pReq->pointerXPos = 0;
2740 pReq->pointerYPos = 0;
2741 rc = VbglGRPerform(&pReq->header);
2742 if (RT_FAILURE(rc))
2743 LogRelFunc(("failed with rc=%Rrc\n", rc));
2744 VbglGRFree(&pReq->header);
2745 }
2746 return rc;
2747}
2748
2749
2750/**
2751 * Changes the mouse status mask for the given session.
2752 *
2753 * This is called in response to VBOXGUEST_IOCTL_SET_MOUSE_STATUS as well as to
2754 * do session cleanup.
2755 *
2756 * @returns VBox status code.
2757 * @param pDevExt The device extension.
2758 * @param pSession The session.
2759 * @param fOrMask The status flags to add.
2760 * @param fNotMask The status flags to remove.
2761 * @param fSessionTermination Set if we're called by the session cleanup code.
2762 * This tweaks the error handling so we perform
2763 * proper session cleanup even if the host
2764 * misbehaves.
2765 *
2766 * @remarks Takes the session spinlock.
2767 */
2768static int vbgdSetSessionMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2769 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination)
2770{
2771 VMMDevReqMouseStatus *pReq;
2772 uint32_t fChanged;
2773 uint32_t fPrevious;
2774 int rc;
2775
2776 /*
2777 * Preallocate a request buffer so we can do all in one go without leaving the spinlock.
2778 */
2779 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetMouseStatus);
2780 if (RT_SUCCESS(rc))
2781 { /* nothing */ }
2782 else if (!fSessionTermination)
2783 {
2784 LogRel(("vbgdSetSessionMouseStatus: VbglGRAlloc failure: %Rrc\n", rc));
2785 return rc;
2786 }
2787 else
2788 pReq = NULL; /* Ignore failure, we must do session cleanup. */
2789
2790
2791 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2792
2793 /*
2794 * Apply the changes to the session mask.
2795 */
2796 fPrevious = pSession->fMouseStatus;
2797 pSession->fMouseStatus |= fOrMask;
2798 pSession->fMouseStatus &= fNotMask;
2799
2800 /*
2801 * If anything actually changed, update the global usage counters.
2802 */
2803 fChanged = fPrevious ^ pSession->fMouseStatus;
2804 if (fChanged)
2805 {
2806 bool fGlobalChange = vbgdBitUsageTrackerChange(&pDevExt->MouseStatusTracker, fChanged, fPrevious,
2807 pDevExt->cSessions, "MouseStatusTracker");
2808
2809 /*
2810 * If there are global changes, update the event filter on the host.
2811 */
2812 if (fGlobalChange || pDevExt->fMouseStatusHost == UINT32_MAX)
2813 {
2814 Assert(pReq || fSessionTermination);
2815 if (pReq)
2816 {
2817 pReq->mouseFeatures = pDevExt->MouseStatusTracker.fMask;
2818 if (pReq->mouseFeatures == pDevExt->fMouseStatusHost)
2819 rc = VINF_SUCCESS;
2820 else
2821 {
2822 pDevExt->fMouseStatusHost = pReq->mouseFeatures;
2823 pReq->pointerXPos = 0;
2824 pReq->pointerYPos = 0;
2825 rc = VbglGRPerform(&pReq->header);
2826 if (RT_FAILURE(rc))
2827 {
2828 /*
2829 * Failed, roll back (unless it's session termination time).
2830 */
2831 pDevExt->fMouseStatusHost = UINT32_MAX;
2832 if (!fSessionTermination)
2833 {
2834 vbgdBitUsageTrackerChange(&pDevExt->MouseStatusTracker, fChanged, pSession->fMouseStatus,
2835 pDevExt->cSessions, "MouseStatusTracker");
2836 pSession->fMouseStatus = fPrevious;
2837 }
2838 }
2839 }
2840 }
2841 else
2842 rc = VINF_SUCCESS;
2843 }
2844 }
2845
2846 RTSpinlockRelease(pDevExt->SessionSpinlock);
2847 if (pReq)
2848 VbglGRFree(&pReq->header);
2849 return rc;
2850}
2851
2852
2853/**
2854 * Sets the mouse status features for this session and updates them globally.
2855 *
2856 * @returns VBox status code.
2857 *
2858 * @param pDevExt The device extention.
2859 * @param pSession The session.
2860 * @param fFeatures New bitmap of enabled features.
2861 */
2862static int vbgdIoCtl_SetMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fFeatures)
2863{
2864 int rc;
2865 LogFlow(("VbgdCommonIoCtl: CTL_FILTER_MASK features=%#x\n", fFeatures));
2866
2867 if (fFeatures & ~VMMDEV_MOUSE_GUEST_MASK)
2868 return VERR_INVALID_PARAMETER;
2869
2870 return vbgdSetSessionMouseStatus(pDevExt, pSession, fFeatures, ~fFeatures, false /*fSessionTermination*/);
2871}
2872
2873
2874/**
2875 * Return the mask of VMM device events that this session is allowed to see (wrt
2876 * to "acquire" mode guest capabilities).
2877 *
2878 * The events associated with guest capabilities in "acquire" mode will be
2879 * restricted to sessions which has acquired the respective capabilities.
2880 * If someone else tries to wait for acquired events, they won't be woken up
2881 * when the event becomes pending. Should some other thread in the session
2882 * acquire the capability while the corresponding event is pending, the waiting
2883 * thread will woken up.
2884 *
2885 * @returns Mask of events valid for the given session.
2886 * @param pDevExt The device extension.
2887 * @param pSession The session.
2888 *
2889 * @remarks Needs only be called when dispatching events in the
2890 * VBOXGUEST_ACQUIRE_STYLE_EVENTS mask.
2891 */
2892static uint32_t vbgdGetAllowedEventMaskForSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2893{
2894 uint32_t fAcquireModeGuestCaps;
2895 uint32_t fAcquiredGuestCaps;
2896 uint32_t fAllowedEvents;
2897
2898 /*
2899 * Note! Reads pSession->fAcquiredGuestCaps and pDevExt->fAcquireModeGuestCaps
2900 * WITHOUT holding VBOXGUESTDEVEXT::SessionSpinlock.
2901 */
2902 fAcquireModeGuestCaps = ASMAtomicUoReadU32(&pDevExt->fAcquireModeGuestCaps);
2903 if (fAcquireModeGuestCaps == 0)
2904 return VMMDEV_EVENT_VALID_EVENT_MASK;
2905 fAcquiredGuestCaps = ASMAtomicUoReadU32(&pSession->fAcquiredGuestCaps);
2906
2907 /*
2908 * Calculate which events to allow according to the cap config and caps
2909 * acquired by the session.
2910 */
2911 fAllowedEvents = VMMDEV_EVENT_VALID_EVENT_MASK;
2912 if ( !(fAcquiredGuestCaps & VMMDEV_GUEST_SUPPORTS_GRAPHICS)
2913 && (fAcquireModeGuestCaps & VMMDEV_GUEST_SUPPORTS_GRAPHICS))
2914 fAllowedEvents &= ~VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST;
2915
2916 if ( !(fAcquiredGuestCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
2917 && (fAcquireModeGuestCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS))
2918 fAllowedEvents &= ~VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
2919
2920 return fAllowedEvents;
2921}
2922
2923
2924/**
2925 * Init and termination worker for set guest capabilities to zero on the host.
2926 *
2927 * @returns VBox status code.
2928 * @param pDevExt The device extension.
2929 */
2930static int vbgdResetCapabilitiesOnHost(PVBOXGUESTDEVEXT pDevExt)
2931{
2932 VMMDevReqGuestCapabilities2 *pReq;
2933 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
2934 if (RT_SUCCESS(rc))
2935 {
2936 pReq->u32NotMask = UINT32_MAX;
2937 pReq->u32OrMask = 0;
2938 rc = VbglGRPerform(&pReq->header);
2939
2940 if (RT_FAILURE(rc))
2941 LogRelFunc(("failed with rc=%Rrc\n", rc));
2942 VbglGRFree(&pReq->header);
2943 }
2944 return rc;
2945}
2946
2947
2948/**
2949 * Sets the guest capabilities to the host while holding the lock.
2950 *
2951 * This will ASSUME that we're the ones in charge of the mask, so
2952 * we'll simply clear all bits we don't set.
2953 *
2954 * @returns VBox status code.
2955 * @param fMask The new mask.
2956 */
2957static int vbgdUpdateCapabilitiesOnHostWithReqAndLock(PVBOXGUESTDEVEXT pDevExt, VMMDevReqGuestCapabilities2 *pReq)
2958{
2959 int rc;
2960 uint32_t iBit;
2961
2962 pReq->u32OrMask = pDevExt->fAcquiredGuestCaps | pDevExt->SetGuestCapsTracker.fMask;
2963 if (pReq->u32OrMask == pDevExt->fGuestCapsHost)
2964 rc = VINF_SUCCESS;
2965 else
2966 {
2967 pDevExt->fGuestCapsHost = pReq->u32OrMask;
2968 pReq->u32NotMask = ~pReq->u32OrMask;
2969 rc = VbglGRPerform(&pReq->header);
2970 if (RT_FAILURE(rc))
2971 pDevExt->fGuestCapsHost = UINT32_MAX;
2972 }
2973
2974 return rc;
2975}
2976
2977
2978/**
2979 * Switch a set of capabilities into "acquire" mode and (maybe) acquire them for
2980 * the given session.
2981 *
2982 * This is called in response to VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE as well as
2983 * to do session cleanup.
2984 *
2985 * @returns VBox status code.
2986 * @param pDevExt The device extension.
2987 * @param pSession The session.
2988 * @param fOrMask The capabilities to add .
2989 * @param fNotMask The capabilities to remove. Ignored in
2990 * VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE.
2991 * @param enmFlags Confusing operation modifier.
2992 * VBOXGUESTCAPSACQUIRE_FLAGS_NONE means to both
2993 * configure and acquire/release the capabilities.
2994 * VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE
2995 * means only configure capabilities in the
2996 * @a fOrMask capabilities for "acquire" mode.
2997 * @param fSessionTermination Set if we're called by the session cleanup code.
2998 * This tweaks the error handling so we perform
2999 * proper session cleanup even if the host
3000 * misbehaves.
3001 *
3002 * @remarks Takes both the session and event spinlocks.
3003 */
3004static int vbgdAcquireSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
3005 uint32_t fOrMask, uint32_t fNotMask, VBOXGUESTCAPSACQUIRE_FLAGS enmFlags,
3006 bool fSessionTermination)
3007{
3008 uint32_t fCurrentOwnedCaps;
3009 uint32_t fSessionRemovedCaps;
3010 uint32_t fSessionAddedCaps;
3011 uint32_t fOtherConflictingCaps;
3012 VMMDevReqGuestCapabilities2 *pReq = NULL;
3013 int rc;
3014
3015
3016 /*
3017 * Validate and adjust input.
3018 */
3019 if (fOrMask & ~( VMMDEV_GUEST_SUPPORTS_SEAMLESS
3020 | VMMDEV_GUEST_SUPPORTS_GUEST_HOST_WINDOW_MAPPING
3021 | VMMDEV_GUEST_SUPPORTS_GRAPHICS ) )
3022 {
3023 LogRel(("vbgdAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x enmFlags=%#x -- invalid fOrMask\n",
3024 pSession, fOrMask, fNotMask, enmFlags));
3025 return VERR_INVALID_PARAMETER;
3026 }
3027
3028 if ( enmFlags != VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE
3029 && enmFlags != VBOXGUESTCAPSACQUIRE_FLAGS_NONE)
3030 {
3031 LogRel(("vbgdAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x enmFlags=%#x: invalid enmFlags %d\n",
3032 pSession, fOrMask, fNotMask, enmFlags));
3033 return VERR_INVALID_PARAMETER;
3034 }
3035 Assert(!fOrMask || !fSessionTermination);
3036
3037 /* The fNotMask no need to have all values valid, invalid ones will simply be ignored. */
3038 fNotMask &= ~fOrMask;
3039
3040 /*
3041 * Preallocate a update request if we're about to do more than just configure
3042 * the capability mode.
3043 */
3044 if (enmFlags != VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE)
3045 {
3046 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
3047 if (RT_SUCCESS(rc))
3048 { /* do nothing */ }
3049 else if (!fSessionTermination)
3050 {
3051 LogRel(("vbgdAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x enmFlags=%#x: VbglGRAlloc failure: %Rrc\n",
3052 pSession, fOrMask, fNotMask, enmFlags, rc));
3053 return rc;
3054 }
3055 else
3056 pReq = NULL; /* Ignore failure, we must do session cleanup. */
3057 }
3058
3059 /*
3060 * Try switch the capabilities in the OR mask into "acquire" mode.
3061 *
3062 * Note! We currently ignore anyone which may already have "set" the capabilities
3063 * in fOrMask. Perhaps not the best way to handle it, but it's simple...
3064 */
3065 RTSpinlockAcquire(pDevExt->EventSpinlock);
3066
3067 if (!(pDevExt->fSetModeGuestCaps & fOrMask))
3068 pDevExt->fAcquireModeGuestCaps |= fOrMask;
3069 else
3070 {
3071 RTSpinlockRelease(pDevExt->EventSpinlock);
3072
3073 if (pReq)
3074 VbglGRFree(&pReq->header);
3075 AssertMsgFailed(("Trying to change caps mode: %#x\n", fOrMask));
3076 LogRel(("vbgdAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x enmFlags=%#x: calling caps acquire for set caps\n",
3077 pSession, fOrMask, fNotMask, enmFlags));
3078 return VERR_INVALID_STATE;
3079 }
3080
3081 /*
3082 * If we only wanted to switch the capabilities into "acquire" mode, we're done now.
3083 */
3084 if (enmFlags & VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE)
3085 {
3086 RTSpinlockRelease(pDevExt->EventSpinlock);
3087
3088 Assert(!pReq);
3089 Log(("vbgdAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x enmFlags=%#x: configured acquire caps: 0x%x\n",
3090 pSession, fOrMask, fNotMask, enmFlags));
3091 return VINF_SUCCESS;
3092 }
3093 Assert(pReq || fSessionTermination);
3094
3095 /*
3096 * Caller wants to acquire/release the capabilities too.
3097 *
3098 * Note! The mode change of the capabilities above won't be reverted on
3099 * failure, this is intentional.
3100 */
3101 fCurrentOwnedCaps = pSession->fAcquiredGuestCaps;
3102 fSessionRemovedCaps = fCurrentOwnedCaps & fNotMask;
3103 fSessionAddedCaps = fOrMask & ~fCurrentOwnedCaps;
3104 fOtherConflictingCaps = pDevExt->fAcquiredGuestCaps & ~fCurrentOwnedCaps;
3105 fOtherConflictingCaps &= fSessionAddedCaps;
3106
3107 if (!fOtherConflictingCaps)
3108 {
3109 if (fSessionAddedCaps)
3110 {
3111 pSession->fAcquiredGuestCaps |= fSessionAddedCaps;
3112 pDevExt->fAcquiredGuestCaps |= fSessionAddedCaps;
3113 }
3114
3115 if (fSessionRemovedCaps)
3116 {
3117 pSession->fAcquiredGuestCaps &= ~fSessionRemovedCaps;
3118 pDevExt->fAcquiredGuestCaps &= ~fSessionRemovedCaps;
3119 }
3120
3121 /*
3122 * If something changes (which is very likely), tell the host.
3123 */
3124 if (fSessionAddedCaps || fSessionRemovedCaps || pDevExt->fGuestCapsHost == UINT32_MAX)
3125 {
3126 Assert(pReq || fSessionTermination);
3127 if (pReq)
3128 {
3129 rc = vbgdUpdateCapabilitiesOnHostWithReqAndLock(pDevExt, pReq);
3130 if (RT_FAILURE(rc) && !fSessionTermination)
3131 {
3132 /* Failed, roll back. */
3133 if (fSessionAddedCaps)
3134 {
3135 pSession->fAcquiredGuestCaps &= ~fSessionAddedCaps;
3136 pDevExt->fAcquiredGuestCaps &= ~fSessionAddedCaps;
3137 }
3138 if (fSessionRemovedCaps)
3139 {
3140 pSession->fAcquiredGuestCaps |= fSessionRemovedCaps;
3141 pDevExt->fAcquiredGuestCaps |= fSessionRemovedCaps;
3142 }
3143
3144 RTSpinlockRelease(pDevExt->EventSpinlock);
3145 LogRel(("vbgdAcquireSessionCapabilities: vbgdUpdateCapabilitiesOnHostWithReqAndLock failed: rc=%Rrc\n", rc));
3146 VbglGRFree(&pReq->header);
3147 return rc;
3148 }
3149 }
3150 }
3151 }
3152 else
3153 {
3154 RTSpinlockRelease(pDevExt->EventSpinlock);
3155
3156 Log(("vbgdAcquireSessionCapabilities: Caps %#x were busy\n", fOtherConflictingCaps));
3157 VbglGRFree(&pReq->header);
3158 return VERR_RESOURCE_BUSY;
3159 }
3160
3161 RTSpinlockRelease(pDevExt->EventSpinlock);
3162 if (pReq)
3163 VbglGRFree(&pReq->header);
3164
3165 /*
3166 * If we added a capability, check if that means some other thread in our
3167 * session should be unblocked because there are events pending.
3168 *
3169 * HACK ALERT! When the seamless support capability is added we generate a
3170 * seamless change event so that the ring-3 client can sync with
3171 * the seamless state. Although this introduces a spurious
3172 * wakeups of the ring-3 client, it solves the problem of client
3173 * state inconsistency in multiuser environment (on Windows).
3174 */
3175 if (fSessionAddedCaps)
3176 {
3177 uint32_t fGenFakeEvents = 0;
3178 if (fSessionAddedCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
3179 fGenFakeEvents |= VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
3180
3181 RTSpinlockAcquire(pDevExt->EventSpinlock);
3182 if (fGenFakeEvents || pDevExt->f32PendingEvents)
3183 vbgdDispatchEventsLocked(pDevExt, fGenFakeEvents);
3184 RTSpinlockRelease(pDevExt->EventSpinlock);
3185
3186#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
3187 VbgdCommonWaitDoWakeUps(pDevExt);
3188#endif
3189 }
3190
3191 return VINF_SUCCESS;
3192}
3193
3194
3195/**
3196 * Handle VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE.
3197 *
3198 * @returns VBox status code.
3199 *
3200 * @param pDevExt The device extension.
3201 * @param pSession The session.
3202 * @param pAcquire The request.
3203 */
3204static int vbgdIoCtl_GuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestCapsAquire *pAcquire)
3205{
3206 int rc;
3207 LogFlow(("VbgdCommonIoCtl: GUEST_CAPS_ACQUIRE or=%#x not=%#x flags=%#x\n",
3208 pAcquire->u32OrMask, pAcquire->u32NotMask, pAcquire->enmFlags));
3209
3210 rc = vbgdAcquireSessionCapabilities(pDevExt, pSession, pAcquire->u32OrMask, pAcquire->u32NotMask, pAcquire->enmFlags,
3211 false /*fSessionTermination*/);
3212 if (RT_FAILURE(rc))
3213 LogRel(("VbgdCommonIoCtl: GUEST_CAPS_ACQUIRE failed rc=%Rrc\n", rc));
3214 pAcquire->rc = rc;
3215 return VINF_SUCCESS;
3216}
3217
3218
3219/**
3220 * Sets the guest capabilities for a session.
3221 *
3222 * @returns VBox status code.
3223 * @param pDevExt The device extension.
3224 * @param pSession The session.
3225 * @param fOrMask The capabilities to add.
3226 * @param fNotMask The capabilities to remove.
3227 * @param fSessionTermination Set if we're called by the session cleanup code.
3228 * This tweaks the error handling so we perform
3229 * proper session cleanup even if the host
3230 * misbehaves.
3231 *
3232 * @remarks Takes the session spinlock.
3233 */
3234static int vbgdSetSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
3235 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination)
3236{
3237 /*
3238 * Preallocate a request buffer so we can do all in one go without leaving the spinlock.
3239 */
3240 VMMDevReqGuestCapabilities2 *pReq;
3241 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
3242 if (RT_SUCCESS(rc))
3243 { /* nothing */ }
3244 else if (!fSessionTermination)
3245 {
3246 LogRel(("vbgdSetSessionCapabilities: VbglGRAlloc failure: %Rrc\n", rc));
3247 return rc;
3248 }
3249 else
3250 pReq = NULL; /* Ignore failure, we must do session cleanup. */
3251
3252
3253 RTSpinlockAcquire(pDevExt->SessionSpinlock);
3254
3255#ifndef VBOXGUEST_DISREGARD_ACQUIRE_MODE_GUEST_CAPS
3256 /*
3257 * Capabilities in "acquire" mode cannot be set via this API.
3258 * (Acquire mode is only used on windows at the time of writing.)
3259 */
3260 if (!(fOrMask & pDevExt->fAcquireModeGuestCaps))
3261#endif
3262 {
3263 /*
3264 * Apply the changes to the session mask.
3265 */
3266 uint32_t fChanged;
3267 uint32_t fPrevious = pSession->fCapabilities;
3268 pSession->fCapabilities |= fOrMask;
3269 pSession->fCapabilities &= ~fNotMask;
3270
3271 /*
3272 * If anything actually changed, update the global usage counters.
3273 */
3274 fChanged = fPrevious ^ pSession->fCapabilities;
3275 if (fChanged)
3276 {
3277 bool fGlobalChange = vbgdBitUsageTrackerChange(&pDevExt->SetGuestCapsTracker, fChanged, fPrevious,
3278 pDevExt->cSessions, "SetGuestCapsTracker");
3279
3280 /*
3281 * If there are global changes, update the capabilities on the host.
3282 */
3283 if (fGlobalChange || pDevExt->fGuestCapsHost == UINT32_MAX)
3284 {
3285 Assert(pReq || fSessionTermination);
3286 if (pReq)
3287 {
3288 rc = vbgdUpdateCapabilitiesOnHostWithReqAndLock(pDevExt, pReq);
3289
3290 /* On failure, roll back (unless it's session termination time). */
3291 if (RT_FAILURE(rc) && !fSessionTermination)
3292 {
3293 vbgdBitUsageTrackerChange(&pDevExt->SetGuestCapsTracker, fChanged, pSession->fCapabilities,
3294 pDevExt->cSessions, "SetGuestCapsTracker");
3295 pSession->fCapabilities = fPrevious;
3296 }
3297 }
3298 }
3299 }
3300 }
3301#ifndef VBOXGUEST_DISREGARD_ACQUIRE_MODE_GUEST_CAPS
3302 else
3303 rc = VERR_RESOURCE_BUSY;
3304#endif
3305
3306 RTSpinlockRelease(pDevExt->SessionSpinlock);
3307 if (pReq)
3308 VbglGRFree(&pReq->header);
3309 return rc;
3310}
3311
3312
3313/**
3314 * Handle VBOXGUEST_IOCTL_SET_GUEST_CAPABILITIES.
3315 *
3316 * @returns VBox status code.
3317 *
3318 * @param pDevExt The device extension.
3319 * @param pSession The session.
3320 * @param pInfo The request.
3321 */
3322static int vbgdIoCtl_SetCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestSetCapabilitiesInfo *pInfo)
3323{
3324 int rc;
3325 LogFlow(("VbgdCommonIoCtl: SET_GUEST_CAPABILITIES or=%#x not=%#x\n", pInfo->u32OrMask, pInfo->u32NotMask));
3326
3327 if (!((pInfo->u32OrMask | pInfo->u32NotMask) & ~VMMDEV_GUEST_CAPABILITIES_MASK))
3328 rc = vbgdSetSessionCapabilities(pDevExt, pSession, pInfo->u32OrMask, pInfo->u32NotMask, false /*fSessionTermination*/);
3329 else
3330 rc = VERR_INVALID_PARAMETER;
3331
3332 return rc;
3333}
3334
3335/** @} */
3336
3337
3338/**
3339 * Common IOCtl for user to kernel and kernel to kernel communication.
3340 *
3341 * This function only does the basic validation and then invokes
3342 * worker functions that takes care of each specific function.
3343 *
3344 * @returns VBox status code.
3345 *
3346 * @param iFunction The requested function.
3347 * @param pDevExt The device extension.
3348 * @param pSession The client session.
3349 * @param pvData The input/output data buffer. Can be NULL depending on the function.
3350 * @param cbData The max size of the data buffer.
3351 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
3352 */
3353int VbgdCommonIoCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
3354 void *pvData, size_t cbData, size_t *pcbDataReturned)
3355{
3356 int rc;
3357 LogFlow(("VbgdCommonIoCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
3358 iFunction, pDevExt, pSession, pvData, cbData));
3359
3360 /*
3361 * Make sure the returned data size is set to zero.
3362 */
3363 if (pcbDataReturned)
3364 *pcbDataReturned = 0;
3365
3366 /*
3367 * Define some helper macros to simplify validation.
3368 */
3369#define CHECKRET_RING0(mnemonic) \
3370 do { \
3371 if (pSession->R0Process != NIL_RTR0PROCESS) \
3372 { \
3373 LogFunc((mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
3374 pSession->Process, (uintptr_t)pSession->R0Process)); \
3375 return VERR_PERMISSION_DENIED; \
3376 } \
3377 } while (0)
3378#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
3379 do { \
3380 if (cbData < (cbMin)) \
3381 { \
3382 LogFunc((mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
3383 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
3384 return VERR_BUFFER_OVERFLOW; \
3385 } \
3386 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
3387 { \
3388 LogFunc((mnemonic ": Invalid pointer %p\n", pvData)); \
3389 return VERR_INVALID_POINTER; \
3390 } \
3391 } while (0)
3392#define CHECKRET_SIZE(mnemonic, cb) \
3393 do { \
3394 if (cbData != (cb)) \
3395 { \
3396 LogFunc((mnemonic ": cbData=%#zx (%zu) expected is %#zx (%zu)\n", \
3397 cbData, cbData, (size_t)(cb), (size_t)(cb))); \
3398 return VERR_BUFFER_OVERFLOW; \
3399 } \
3400 if ((cb) != 0 && !VALID_PTR(pvData)) \
3401 { \
3402 LogFunc((mnemonic ": Invalid pointer %p\n", pvData)); \
3403 return VERR_INVALID_POINTER; \
3404 } \
3405 } while (0)
3406
3407
3408 /*
3409 * Deal with variably sized requests first.
3410 */
3411 rc = VINF_SUCCESS;
3412 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
3413 {
3414 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
3415 rc = vbgdIoCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
3416 }
3417#ifdef VBOX_WITH_HGCM
3418 /*
3419 * These ones are a bit tricky.
3420 */
3421 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
3422 {
3423 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
3424 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
3425 rc = vbgdIoCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
3426 fInterruptible, false /*f32bit*/, false /* fUserData */,
3427 0, cbData, pcbDataReturned);
3428 }
3429 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
3430 {
3431 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
3432 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
3433 rc = vbgdIoCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
3434 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
3435 false /*f32bit*/, false /* fUserData */,
3436 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
3437 }
3438 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_USERDATA(0)))
3439 {
3440 bool fInterruptible = true;
3441 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
3442 rc = vbgdIoCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
3443 fInterruptible, false /*f32bit*/, true /* fUserData */,
3444 0, cbData, pcbDataReturned);
3445 }
3446# ifdef RT_ARCH_AMD64
3447 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
3448 {
3449 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
3450 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
3451 rc = vbgdIoCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
3452 fInterruptible, true /*f32bit*/, false /* fUserData */,
3453 0, cbData, pcbDataReturned);
3454 }
3455 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
3456 {
3457 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
3458 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
3459 rc = vbgdIoCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
3460 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
3461 true /*f32bit*/, false /* fUserData */,
3462 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
3463 }
3464# endif
3465#endif /* VBOX_WITH_HGCM */
3466 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
3467 {
3468 CHECKRET_MIN_SIZE("LOG", 1);
3469 rc = vbgdIoCtl_Log(pDevExt, (char *)pvData, cbData, pcbDataReturned, pSession->fUserSession);
3470 }
3471 else
3472 {
3473 switch (iFunction)
3474 {
3475 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
3476 CHECKRET_RING0("GETVMMDEVPORT");
3477 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
3478 rc = vbgdIoCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
3479 break;
3480
3481#ifndef RT_OS_WINDOWS /* Windows has its own implementation of this. */
3482 case VBOXGUEST_IOCTL_SET_MOUSE_NOTIFY_CALLBACK:
3483 CHECKRET_RING0("SET_MOUSE_NOTIFY_CALLBACK");
3484 CHECKRET_SIZE("SET_MOUSE_NOTIFY_CALLBACK", sizeof(VBoxGuestMouseSetNotifyCallback));
3485 rc = vbgdIoCtl_SetMouseNotifyCallback(pDevExt, (VBoxGuestMouseSetNotifyCallback *)pvData);
3486 break;
3487#endif
3488
3489 case VBOXGUEST_IOCTL_WAITEVENT:
3490 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
3491 rc = vbgdIoCtl_WaitEvent(pDevExt, pSession, (VBoxGuestWaitEventInfo *)pvData,
3492 pcbDataReturned, pSession->R0Process != NIL_RTR0PROCESS);
3493 break;
3494
3495 case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
3496 if (cbData != 0)
3497 rc = VERR_INVALID_PARAMETER;
3498 rc = vbgdIoCtl_CancelAllWaitEvents(pDevExt, pSession);
3499 break;
3500
3501 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
3502 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
3503 rc = vbgdIoCtl_CtlFilterMask(pDevExt, pSession, (VBoxGuestFilterMaskInfo *)pvData);
3504 break;
3505
3506#ifdef VBOX_WITH_HGCM
3507 case VBOXGUEST_IOCTL_HGCM_CONNECT:
3508# ifdef RT_ARCH_AMD64
3509 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
3510# endif
3511 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
3512 rc = vbgdIoCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
3513 break;
3514
3515 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
3516# ifdef RT_ARCH_AMD64
3517 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
3518# endif
3519 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
3520 rc = vbgdIoCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
3521 break;
3522#endif /* VBOX_WITH_HGCM */
3523
3524 case VBOXGUEST_IOCTL_CHECK_BALLOON:
3525 CHECKRET_MIN_SIZE("CHECK_MEMORY_BALLOON", sizeof(VBoxGuestCheckBalloonInfo));
3526 rc = vbgdIoCtl_CheckMemoryBalloon(pDevExt, pSession, (VBoxGuestCheckBalloonInfo *)pvData, pcbDataReturned);
3527 break;
3528
3529 case VBOXGUEST_IOCTL_CHANGE_BALLOON:
3530 CHECKRET_MIN_SIZE("CHANGE_MEMORY_BALLOON", sizeof(VBoxGuestChangeBalloonInfo));
3531 rc = vbgdIoCtl_ChangeMemoryBalloon(pDevExt, pSession, (VBoxGuestChangeBalloonInfo *)pvData, pcbDataReturned);
3532 break;
3533
3534 case VBOXGUEST_IOCTL_WRITE_CORE_DUMP:
3535 CHECKRET_MIN_SIZE("WRITE_CORE_DUMP", sizeof(VBoxGuestWriteCoreDump));
3536 rc = vbgdIoCtl_WriteCoreDump(pDevExt, (VBoxGuestWriteCoreDump *)pvData);
3537 break;
3538
3539 case VBOXGUEST_IOCTL_SET_MOUSE_STATUS:
3540 CHECKRET_SIZE("SET_MOUSE_STATUS", sizeof(uint32_t));
3541 rc = vbgdIoCtl_SetMouseStatus(pDevExt, pSession, *(uint32_t *)pvData);
3542 break;
3543
3544#ifdef VBOX_WITH_DPC_LATENCY_CHECKER
3545 case VBOXGUEST_IOCTL_DPC_LATENCY_CHECKER:
3546 CHECKRET_SIZE("DPC_LATENCY_CHECKER", 0);
3547 rc = VbgdNtIOCtl_DpcLatencyChecker();
3548 break;
3549#endif
3550
3551 case VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE:
3552 CHECKRET_SIZE("GUEST_CAPS_ACQUIRE", sizeof(VBoxGuestCapsAquire));
3553 rc = vbgdIoCtl_GuestCapsAcquire(pDevExt, pSession, (VBoxGuestCapsAquire *)pvData);
3554 *pcbDataReturned = sizeof(VBoxGuestCapsAquire);
3555 break;
3556
3557 case VBOXGUEST_IOCTL_SET_GUEST_CAPABILITIES:
3558 CHECKRET_MIN_SIZE("SET_GUEST_CAPABILITIES", sizeof(VBoxGuestSetCapabilitiesInfo));
3559 rc = vbgdIoCtl_SetCapabilities(pDevExt, pSession, (VBoxGuestSetCapabilitiesInfo *)pvData);
3560 break;
3561
3562 default:
3563 {
3564 LogRel(("VbgdCommonIoCtl: Unknown request iFunction=%#x stripped size=%#x\n",
3565 iFunction, VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
3566 rc = VERR_NOT_SUPPORTED;
3567 break;
3568 }
3569 }
3570 }
3571
3572 LogFlow(("VbgdCommonIoCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
3573 return rc;
3574}
3575
3576
3577/**
3578 * Used by VbgdCommonISR as well as the acquire guest capability code.
3579 *
3580 * @returns VINF_SUCCESS on success. On failure, ORed together
3581 * RTSemEventMultiSignal errors (completes processing despite errors).
3582 * @param pDevExt The VBoxGuest device extension.
3583 * @param fEvents The events to dispatch.
3584 */
3585static int vbgdDispatchEventsLocked(PVBOXGUESTDEVEXT pDevExt, uint32_t fEvents)
3586{
3587 PVBOXGUESTWAIT pWait;
3588 PVBOXGUESTWAIT pSafe;
3589 int rc = VINF_SUCCESS;
3590
3591 fEvents |= pDevExt->f32PendingEvents;
3592
3593 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
3594 {
3595 uint32_t fHandledEvents = pWait->fReqEvents & fEvents;
3596 if ( fHandledEvents != 0
3597 && !pWait->fResEvents)
3598 {
3599 /* Does this one wait on any of the events we're dispatching? We do a quick
3600 check first, then deal with VBOXGUEST_ACQUIRE_STYLE_EVENTS as applicable. */
3601 if (fHandledEvents & VBOXGUEST_ACQUIRE_STYLE_EVENTS)
3602 fHandledEvents &= vbgdGetAllowedEventMaskForSession(pDevExt, pWait->pSession);
3603 if (fHandledEvents)
3604 {
3605 pWait->fResEvents = pWait->fReqEvents & fEvents & fHandledEvents;
3606 fEvents &= ~pWait->fResEvents;
3607 RTListNodeRemove(&pWait->ListNode);
3608#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
3609 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
3610#else
3611 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
3612 rc |= RTSemEventMultiSignal(pWait->Event);
3613#endif
3614 if (!fEvents)
3615 break;
3616 }
3617 }
3618 }
3619
3620 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
3621 return rc;
3622}
3623
3624
3625/**
3626 * Common interrupt service routine.
3627 *
3628 * This deals with events and with waking up thread waiting for those events.
3629 *
3630 * @returns true if it was our interrupt, false if it wasn't.
3631 * @param pDevExt The VBoxGuest device extension.
3632 */
3633bool VbgdCommonISR(PVBOXGUESTDEVEXT pDevExt)
3634{
3635 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
3636 bool fMousePositionChanged = false;
3637 int rc = 0;
3638 bool fOurIrq;
3639
3640 /*
3641 * Make sure we've initialized the device extension.
3642 */
3643 if (RT_UNLIKELY(!pReq))
3644 return false;
3645
3646 /*
3647 * Enter the spinlock and check if it's our IRQ or not.
3648 */
3649 RTSpinlockAcquire(pDevExt->EventSpinlock);
3650 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
3651 if (fOurIrq)
3652 {
3653 /*
3654 * Acknowlegde events.
3655 * We don't use VbglGRPerform here as it may take another spinlocks.
3656 */
3657 pReq->header.rc = VERR_INTERNAL_ERROR;
3658 pReq->events = 0;
3659 ASMCompilerBarrier();
3660 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
3661 ASMCompilerBarrier(); /* paranoia */
3662 if (RT_SUCCESS(pReq->header.rc))
3663 {
3664 uint32_t fEvents = pReq->events;
3665 PVBOXGUESTWAIT pWait;
3666 PVBOXGUESTWAIT pSafe;
3667
3668 Log3(("VbgdCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
3669
3670 /*
3671 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
3672 */
3673 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
3674 {
3675 fMousePositionChanged = true;
3676 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
3677#ifndef RT_OS_WINDOWS
3678 if (pDevExt->MouseNotifyCallback.pfnNotify)
3679 pDevExt->MouseNotifyCallback.pfnNotify(pDevExt->MouseNotifyCallback.pvUser);
3680#endif
3681 }
3682
3683#ifdef VBOX_WITH_HGCM
3684 /*
3685 * The HGCM event/list is kind of different in that we evaluate all entries.
3686 */
3687 if (fEvents & VMMDEV_EVENT_HGCM)
3688 {
3689 RTListForEachSafe(&pDevExt->HGCMWaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
3690 {
3691 if (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE)
3692 {
3693 pWait->fResEvents = VMMDEV_EVENT_HGCM;
3694 RTListNodeRemove(&pWait->ListNode);
3695# ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
3696 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
3697# else
3698 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
3699 rc |= RTSemEventMultiSignal(pWait->Event);
3700# endif
3701 }
3702 }
3703 fEvents &= ~VMMDEV_EVENT_HGCM;
3704 }
3705#endif
3706
3707 /*
3708 * Normal FIFO waiter evaluation.
3709 */
3710 rc |= vbgdDispatchEventsLocked(pDevExt, fEvents);
3711 }
3712 else /* something is serious wrong... */
3713 Log(("VbgdCommonISR: acknowledge events failed rc=%Rrc (events=%#x)!!\n",
3714 pReq->header.rc, pReq->events));
3715 }
3716 else
3717 Log3(("VbgdCommonISR: not ours\n"));
3718
3719 RTSpinlockRelease(pDevExt->EventSpinlock);
3720
3721#if defined(VBOXGUEST_USE_DEFERRED_WAKE_UP) && !defined(RT_OS_DARWIN) && !defined(RT_OS_WINDOWS)
3722 /*
3723 * Do wake-ups.
3724 * Note. On Windows this isn't possible at this IRQL, so a DPC will take
3725 * care of it. Same on darwin, doing it in the work loop callback.
3726 */
3727 VbgdCommonWaitDoWakeUps(pDevExt);
3728#endif
3729
3730 /*
3731 * Work the poll and async notification queues on OSes that implements that.
3732 * (Do this outside the spinlock to prevent some recursive spinlocking.)
3733 */
3734 if (fMousePositionChanged)
3735 {
3736 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
3737 VbgdNativeISRMousePollEvent(pDevExt);
3738 }
3739
3740 Assert(rc == 0);
3741 NOREF(rc);
3742 return fOurIrq;
3743}
3744
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette