VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 54645

Last change on this file since 54645 was 54640, checked in by vboxsync, 10 years ago

warnings

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 132.0 KB
Line 
1/* $Id: VBoxGuest.cpp 54640 2015-03-05 00:39:06Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#define LOG_GROUP LOG_GROUP_DEFAULT
32#include "VBoxGuestInternal.h"
33#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
34#include <VBox/log.h>
35#include <iprt/mem.h>
36#include <iprt/time.h>
37#include <iprt/memobj.h>
38#include <iprt/asm.h>
39#include <iprt/asm-amd64-x86.h>
40#include <iprt/string.h>
41#include <iprt/process.h>
42#include <iprt/assert.h>
43#include <iprt/param.h>
44#include <iprt/timer.h>
45#ifdef VBOX_WITH_HGCM
46# include <iprt/thread.h>
47#endif
48#include "version-generated.h"
49#if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
50# include "revision-generated.h"
51#endif
52#ifdef RT_OS_WINDOWS
53# ifndef CTL_CODE
54# include <Windows.h>
55# endif
56#endif
57#if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
58# include <iprt/rand.h>
59#endif
60
61
62/*******************************************************************************
63* Defined Constants And Macros *
64*******************************************************************************/
65#define VBOXGUEST_ACQUIRE_STYLE_EVENTS (VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST | VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST)
66
67
68/*******************************************************************************
69* Internal Functions *
70*******************************************************************************/
71#ifdef VBOX_WITH_HGCM
72static DECLCALLBACK(int) vbgdHgcmAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
73#endif
74static int vbgdIoCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession);
75static void vbgdBitUsageTrackerClear(PVBOXGUESTBITUSAGETRACER pTracker);
76static uint32_t vbgdGetAllowedEventMaskForSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession);
77static int vbgdResetEventFilterOnHost(PVBOXGUESTDEVEXT pDevExt, uint32_t fFixedEvents);
78static int vbgdResetMouseStatusOnHost(PVBOXGUESTDEVEXT pDevExt);
79static int vbgdResetCapabilitiesOnHost(PVBOXGUESTDEVEXT pDevExt);
80static int vbgdSetSessionEventFilter(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
81 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination);
82static int vbgdSetSessionMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
83 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination);
84static int vbgdSetSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
85 uint32_t fOrMask, uint32_t fNoMask, bool fSessionTermination);
86static int vbgdAcquireSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fOrMask,
87 uint32_t fNotMask, VBOXGUESTCAPSACQUIRE_FLAGS enmFlags, bool fSessionTermination);
88static int vbgdDispatchEventsLocked(PVBOXGUESTDEVEXT pDevExt, uint32_t fEvents);
89
90
91/*******************************************************************************
92* Global Variables *
93*******************************************************************************/
94static const uint32_t g_cbChangeMemBalloonReq = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
95
96#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
97/**
98 * Drag in the rest of IRPT since we share it with the
99 * rest of the kernel modules on Solaris.
100 */
101PFNRT g_apfnVBoxGuestIPRTDeps[] =
102{
103 /* VirtioNet */
104 (PFNRT)RTRandBytes,
105 /* RTSemMutex* */
106 (PFNRT)RTSemMutexCreate,
107 (PFNRT)RTSemMutexDestroy,
108 (PFNRT)RTSemMutexRequest,
109 (PFNRT)RTSemMutexRequestNoResume,
110 (PFNRT)RTSemMutexRequestDebug,
111 (PFNRT)RTSemMutexRequestNoResumeDebug,
112 (PFNRT)RTSemMutexRelease,
113 (PFNRT)RTSemMutexIsOwned,
114 NULL
115};
116#endif /* RT_OS_DARWIN || RT_OS_SOLARIS */
117
118
119/**
120 * Reserves memory in which the VMM can relocate any guest mappings
121 * that are floating around.
122 *
123 * This operation is a little bit tricky since the VMM might not accept
124 * just any address because of address clashes between the three contexts
125 * it operates in, so use a small stack to perform this operation.
126 *
127 * @returns VBox status code (ignored).
128 * @param pDevExt The device extension.
129 */
130static int vbgdInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
131{
132 /*
133 * Query the required space.
134 */
135 VMMDevReqHypervisorInfo *pReq;
136 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
137 if (RT_FAILURE(rc))
138 return rc;
139 pReq->hypervisorStart = 0;
140 pReq->hypervisorSize = 0;
141 rc = VbglGRPerform(&pReq->header);
142 if (RT_FAILURE(rc)) /* this shouldn't happen! */
143 {
144 VbglGRFree(&pReq->header);
145 return rc;
146 }
147
148 /*
149 * The VMM will report back if there is nothing it wants to map, like for
150 * instance in VT-x and AMD-V mode.
151 */
152 if (pReq->hypervisorSize == 0)
153 Log(("vbgdInitFixateGuestMappings: nothing to do\n"));
154 else
155 {
156 /*
157 * We have to try several times since the host can be picky
158 * about certain addresses.
159 */
160 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
161 uint32_t cbHypervisor = pReq->hypervisorSize;
162 RTR0MEMOBJ ahTries[5];
163 uint32_t iTry;
164 bool fBitched = false;
165 Log(("vbgdInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
166 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
167 {
168 /*
169 * Reserve space, or if that isn't supported, create a object for
170 * some fictive physical memory and map that in to kernel space.
171 *
172 * To make the code a bit uglier, most systems cannot help with
173 * 4MB alignment, so we have to deal with that in addition to
174 * having two ways of getting the memory.
175 */
176 uint32_t uAlignment = _4M;
177 RTR0MEMOBJ hObj;
178 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
179 if (rc == VERR_NOT_SUPPORTED)
180 {
181 uAlignment = PAGE_SIZE;
182 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
183 }
184 /*
185 * If both RTR0MemObjReserveKernel calls above failed because either not supported or
186 * not implemented at all at the current platform, try to map the memory object into the
187 * virtual kernel space.
188 */
189 if (rc == VERR_NOT_SUPPORTED)
190 {
191 if (hFictive == NIL_RTR0MEMOBJ)
192 {
193 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
194 if (RT_FAILURE(rc))
195 break;
196 hFictive = hObj;
197 }
198 uAlignment = _4M;
199 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
200 if (rc == VERR_NOT_SUPPORTED)
201 {
202 uAlignment = PAGE_SIZE;
203 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
204 }
205 }
206 if (RT_FAILURE(rc))
207 {
208 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
209 rc, cbHypervisor, uAlignment, iTry));
210 fBitched = true;
211 break;
212 }
213
214 /*
215 * Try set it.
216 */
217 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
218 pReq->header.rc = VERR_INTERNAL_ERROR;
219 pReq->hypervisorSize = cbHypervisor;
220 pReq->hypervisorStart = (RTGCPTR32)(uintptr_t)RTR0MemObjAddress(hObj);
221 if ( uAlignment == PAGE_SIZE
222 && pReq->hypervisorStart & (_4M - 1))
223 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
224 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
225
226 rc = VbglGRPerform(&pReq->header);
227 if (RT_SUCCESS(rc))
228 {
229 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
230 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
231 RTR0MemObjAddress(pDevExt->hGuestMappings),
232 RTR0MemObjSize(pDevExt->hGuestMappings),
233 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
234 break;
235 }
236 ahTries[iTry] = hObj;
237 }
238
239 /*
240 * Cleanup failed attempts.
241 */
242 while (iTry-- > 0)
243 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
244 if ( RT_FAILURE(rc)
245 && hFictive != NIL_RTR0PTR)
246 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
247 if (RT_FAILURE(rc) && !fBitched)
248 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
249 }
250 VbglGRFree(&pReq->header);
251
252 /*
253 * We ignore failed attempts for now.
254 */
255 return VINF_SUCCESS;
256}
257
258
259/**
260 * Undo what vbgdInitFixateGuestMappings did.
261 *
262 * @param pDevExt The device extension.
263 */
264static void vbgdTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
265{
266 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
267 {
268 /*
269 * Tell the host that we're going to free the memory we reserved for
270 * it, the free it up. (Leak the memory if anything goes wrong here.)
271 */
272 VMMDevReqHypervisorInfo *pReq;
273 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
274 if (RT_SUCCESS(rc))
275 {
276 pReq->hypervisorStart = 0;
277 pReq->hypervisorSize = 0;
278 rc = VbglGRPerform(&pReq->header);
279 VbglGRFree(&pReq->header);
280 }
281 if (RT_SUCCESS(rc))
282 {
283 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
284 AssertRC(rc);
285 }
286 else
287 LogRel(("vbgdTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
288
289 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
290 }
291}
292
293
294
295/**
296 * Report the guest information to the host.
297 *
298 * @returns IPRT status code.
299 * @param enmOSType The OS type to report.
300 */
301static int vbgdReportGuestInfo(VBOXOSTYPE enmOSType)
302{
303 /*
304 * Allocate and fill in the two guest info reports.
305 */
306 VMMDevReportGuestInfo2 *pReqInfo2 = NULL;
307 VMMDevReportGuestInfo *pReqInfo1 = NULL;
308 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReqInfo2, sizeof (VMMDevReportGuestInfo2), VMMDevReq_ReportGuestInfo2);
309 Log(("vbgdReportGuestInfo: VbglGRAlloc VMMDevReportGuestInfo2 completed with rc=%Rrc\n", rc));
310 if (RT_SUCCESS(rc))
311 {
312 pReqInfo2->guestInfo.additionsMajor = VBOX_VERSION_MAJOR;
313 pReqInfo2->guestInfo.additionsMinor = VBOX_VERSION_MINOR;
314 pReqInfo2->guestInfo.additionsBuild = VBOX_VERSION_BUILD;
315 pReqInfo2->guestInfo.additionsRevision = VBOX_SVN_REV;
316 pReqInfo2->guestInfo.additionsFeatures = 0; /* (no features defined yet) */
317 RTStrCopy(pReqInfo2->guestInfo.szName, sizeof(pReqInfo2->guestInfo.szName), VBOX_VERSION_STRING);
318
319 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReqInfo1, sizeof (VMMDevReportGuestInfo), VMMDevReq_ReportGuestInfo);
320 Log(("vbgdReportGuestInfo: VbglGRAlloc VMMDevReportGuestInfo completed with rc=%Rrc\n", rc));
321 if (RT_SUCCESS(rc))
322 {
323 pReqInfo1->guestInfo.interfaceVersion = VMMDEV_VERSION;
324 pReqInfo1->guestInfo.osType = enmOSType;
325
326 /*
327 * There are two protocols here:
328 * 1. Info2 + Info1. Supported by >=3.2.51.
329 * 2. Info1 and optionally Info2. The old protocol.
330 *
331 * We try protocol 1 first. It will fail with VERR_NOT_SUPPORTED
332 * if not supported by the VMMDev (message ordering requirement).
333 */
334 rc = VbglGRPerform(&pReqInfo2->header);
335 Log(("vbgdReportGuestInfo: VbglGRPerform VMMDevReportGuestInfo2 completed with rc=%Rrc\n", rc));
336 if (RT_SUCCESS(rc))
337 {
338 rc = VbglGRPerform(&pReqInfo1->header);
339 Log(("vbgdReportGuestInfo: VbglGRPerform VMMDevReportGuestInfo completed with rc=%Rrc\n", rc));
340 }
341 else if ( rc == VERR_NOT_SUPPORTED
342 || rc == VERR_NOT_IMPLEMENTED)
343 {
344 rc = VbglGRPerform(&pReqInfo1->header);
345 Log(("vbgdReportGuestInfo: VbglGRPerform VMMDevReportGuestInfo completed with rc=%Rrc\n", rc));
346 if (RT_SUCCESS(rc))
347 {
348 rc = VbglGRPerform(&pReqInfo2->header);
349 Log(("vbgdReportGuestInfo: VbglGRPerform VMMDevReportGuestInfo2 completed with rc=%Rrc\n", rc));
350 if (rc == VERR_NOT_IMPLEMENTED)
351 rc = VINF_SUCCESS;
352 }
353 }
354 VbglGRFree(&pReqInfo1->header);
355 }
356 VbglGRFree(&pReqInfo2->header);
357 }
358
359 return rc;
360}
361
362
363/**
364 * Report the guest driver status to the host.
365 *
366 * @returns IPRT status code.
367 * @param fActive Flag whether the driver is now active or not.
368 */
369static int vbgdReportDriverStatus(bool fActive)
370{
371 /*
372 * Report guest status of the VBox driver to the host.
373 */
374 VMMDevReportGuestStatus *pReq2 = NULL;
375 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq2, sizeof(*pReq2), VMMDevReq_ReportGuestStatus);
376 Log(("vbgdReportDriverStatus: VbglGRAlloc VMMDevReportGuestStatus completed with rc=%Rrc\n", rc));
377 if (RT_SUCCESS(rc))
378 {
379 pReq2->guestStatus.facility = VBoxGuestFacilityType_VBoxGuestDriver;
380 pReq2->guestStatus.status = fActive ?
381 VBoxGuestFacilityStatus_Active
382 : VBoxGuestFacilityStatus_Inactive;
383 pReq2->guestStatus.flags = 0;
384 rc = VbglGRPerform(&pReq2->header);
385 Log(("vbgdReportDriverStatus: VbglGRPerform VMMDevReportGuestStatus completed with fActive=%d, rc=%Rrc\n",
386 fActive ? 1 : 0, rc));
387 if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
388 rc = VINF_SUCCESS;
389 VbglGRFree(&pReq2->header);
390 }
391
392 return rc;
393}
394
395
396/** @name Memory Ballooning
397 * @{
398 */
399
400/**
401 * Inflate the balloon by one chunk represented by an R0 memory object.
402 *
403 * The caller owns the balloon mutex.
404 *
405 * @returns IPRT status code.
406 * @param pMemObj Pointer to the R0 memory object.
407 * @param pReq The pre-allocated request for performing the VMMDev call.
408 */
409static int vbgdBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
410{
411 uint32_t iPage;
412 int rc;
413
414 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
415 {
416 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
417 pReq->aPhysPage[iPage] = phys;
418 }
419
420 pReq->fInflate = true;
421 pReq->header.size = g_cbChangeMemBalloonReq;
422 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
423
424 rc = VbglGRPerform(&pReq->header);
425 if (RT_FAILURE(rc))
426 LogRel(("vbgdBalloonInflate: VbglGRPerform failed. rc=%Rrc\n", rc));
427 return rc;
428}
429
430
431/**
432 * Deflate the balloon by one chunk - info the host and free the memory object.
433 *
434 * The caller owns the balloon mutex.
435 *
436 * @returns IPRT status code.
437 * @param pMemObj Pointer to the R0 memory object.
438 * The memory object will be freed afterwards.
439 * @param pReq The pre-allocated request for performing the VMMDev call.
440 */
441static int vbgdBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
442{
443 uint32_t iPage;
444 int rc;
445
446 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
447 {
448 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
449 pReq->aPhysPage[iPage] = phys;
450 }
451
452 pReq->fInflate = false;
453 pReq->header.size = g_cbChangeMemBalloonReq;
454 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
455
456 rc = VbglGRPerform(&pReq->header);
457 if (RT_FAILURE(rc))
458 {
459 LogRel(("vbgdBalloonDeflate: VbglGRPerform failed. rc=%Rrc\n", rc));
460 return rc;
461 }
462
463 rc = RTR0MemObjFree(*pMemObj, true);
464 if (RT_FAILURE(rc))
465 {
466 LogRel(("vbgdBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
467 return rc;
468 }
469
470 *pMemObj = NIL_RTR0MEMOBJ;
471 return VINF_SUCCESS;
472}
473
474
475/**
476 * Inflate/deflate the memory balloon and notify the host.
477 *
478 * This is a worker used by vbgdIoCtl_CheckMemoryBalloon - it takes the mutex.
479 *
480 * @returns VBox status code.
481 * @param pDevExt The device extension.
482 * @param pSession The session.
483 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
484 * @param pfHandleInR3 Where to return the handle-in-ring3 indicator
485 * (VINF_SUCCESS if set).
486 */
487static int vbgdSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, uint32_t *pfHandleInR3)
488{
489 int rc = VINF_SUCCESS;
490
491 if (pDevExt->MemBalloon.fUseKernelAPI)
492 {
493 VMMDevChangeMemBalloon *pReq;
494 uint32_t i;
495
496 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
497 {
498 LogRel(("vbgdSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
499 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
500 return VERR_INVALID_PARAMETER;
501 }
502
503 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
504 return VINF_SUCCESS; /* nothing to do */
505
506 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
507 && !pDevExt->MemBalloon.paMemObj)
508 {
509 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
510 if (!pDevExt->MemBalloon.paMemObj)
511 {
512 LogRel(("vbgdSetBalloonSizeKernel: no memory for paMemObj!\n"));
513 return VERR_NO_MEMORY;
514 }
515 }
516
517 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, g_cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
518 if (RT_FAILURE(rc))
519 return rc;
520
521 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
522 {
523 /* inflate */
524 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
525 {
526 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
527 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
528 if (RT_FAILURE(rc))
529 {
530 if (rc == VERR_NOT_SUPPORTED)
531 {
532 /* not supported -- fall back to the R3-allocated memory. */
533 rc = VINF_SUCCESS;
534 pDevExt->MemBalloon.fUseKernelAPI = false;
535 Assert(pDevExt->MemBalloon.cChunks == 0);
536 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
537 }
538 /* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
539 * cannot allocate more memory => don't try further, just stop here */
540 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
541 break;
542 }
543
544 rc = vbgdBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
545 if (RT_FAILURE(rc))
546 {
547 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
548 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
549 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
550 break;
551 }
552 pDevExt->MemBalloon.cChunks++;
553 }
554 }
555 else
556 {
557 /* deflate */
558 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
559 {
560 rc = vbgdBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
561 if (RT_FAILURE(rc))
562 {
563 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
564 break;
565 }
566 pDevExt->MemBalloon.cChunks--;
567 }
568 }
569
570 VbglGRFree(&pReq->header);
571 }
572
573 /*
574 * Set the handle-in-ring3 indicator. When set Ring-3 will have to work
575 * the balloon changes via the other API.
576 */
577 *pfHandleInR3 = pDevExt->MemBalloon.fUseKernelAPI ? false : true;
578
579 return rc;
580}
581
582
583/**
584 * Inflate/deflate the balloon by one chunk.
585 *
586 * Worker for vbgdIoCtl_ChangeMemoryBalloon - it takes the mutex.
587 *
588 * @returns VBox status code.
589 * @param pDevExt The device extension.
590 * @param pSession The session.
591 * @param u64ChunkAddr The address of the chunk to add to / remove from the
592 * balloon.
593 * @param fInflate Inflate if true, deflate if false.
594 */
595static int vbgdSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint64_t u64ChunkAddr, bool fInflate)
596{
597 VMMDevChangeMemBalloon *pReq;
598 int rc = VINF_SUCCESS;
599 uint32_t i;
600 PRTR0MEMOBJ pMemObj = NULL;
601
602 if (fInflate)
603 {
604 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
605 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
606 {
607 LogRel(("vboxGuestSetBalloonSize: cannot inflate balloon, already have %u chunks (max=%u)\n",
608 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
609 return VERR_INVALID_PARAMETER;
610 }
611
612 if (!pDevExt->MemBalloon.paMemObj)
613 {
614 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
615 if (!pDevExt->MemBalloon.paMemObj)
616 {
617 LogRel(("VBoxGuestSetBalloonSizeFromUser: no memory for paMemObj!\n"));
618 return VERR_NO_MEMORY;
619 }
620 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
621 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
622 }
623 }
624 else
625 {
626 if (pDevExt->MemBalloon.cChunks == 0)
627 {
628 AssertMsgFailed(("vboxGuestSetBalloonSize: cannot decrease balloon, already at size 0\n"));
629 return VERR_INVALID_PARAMETER;
630 }
631 }
632
633 /*
634 * Enumerate all memory objects and check if the object is already registered.
635 */
636 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
637 {
638 if ( fInflate
639 && !pMemObj
640 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
641 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
642 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == u64ChunkAddr)
643 {
644 if (fInflate)
645 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
646 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
647 break;
648 }
649 }
650 if (!pMemObj)
651 {
652 if (fInflate)
653 {
654 /* no free object pointer found -- should not happen */
655 return VERR_NO_MEMORY;
656 }
657
658 /* cannot free this memory as it wasn't provided before */
659 return VERR_NOT_FOUND;
660 }
661
662 /*
663 * Try inflate / default the balloon as requested.
664 */
665 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, g_cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
666 if (RT_FAILURE(rc))
667 return rc;
668
669 if (fInflate)
670 {
671 rc = RTR0MemObjLockUser(pMemObj, (RTR3PTR)u64ChunkAddr, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
672 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
673 if (RT_SUCCESS(rc))
674 {
675 rc = vbgdBalloonInflate(pMemObj, pReq);
676 if (RT_SUCCESS(rc))
677 pDevExt->MemBalloon.cChunks++;
678 else
679 {
680 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
681 RTR0MemObjFree(*pMemObj, true);
682 *pMemObj = NIL_RTR0MEMOBJ;
683 }
684 }
685 }
686 else
687 {
688 rc = vbgdBalloonDeflate(pMemObj, pReq);
689 if (RT_SUCCESS(rc))
690 pDevExt->MemBalloon.cChunks--;
691 else
692 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
693 }
694
695 VbglGRFree(&pReq->header);
696 return rc;
697}
698
699
700/**
701 * Cleanup the memory balloon of a session.
702 *
703 * Will request the balloon mutex, so it must be valid and the caller must not
704 * own it already.
705 *
706 * @param pDevExt The device extension.
707 * @param pDevExt The session. Can be NULL at unload.
708 */
709static void vbgdCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
710{
711 RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
712 if ( pDevExt->MemBalloon.pOwner == pSession
713 || pSession == NULL /*unload*/)
714 {
715 if (pDevExt->MemBalloon.paMemObj)
716 {
717 VMMDevChangeMemBalloon *pReq;
718 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, g_cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
719 if (RT_SUCCESS(rc))
720 {
721 uint32_t i;
722 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
723 {
724 rc = vbgdBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
725 if (RT_FAILURE(rc))
726 {
727 LogRel(("vbgdCloseMemBalloon: Deflate failed with rc=%Rrc. Will leak %u chunks.\n",
728 rc, pDevExt->MemBalloon.cChunks));
729 break;
730 }
731 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
732 pDevExt->MemBalloon.cChunks--;
733 }
734 VbglGRFree(&pReq->header);
735 }
736 else
737 LogRel(("vbgdCloseMemBalloon: Failed to allocate VMMDev request buffer (rc=%Rrc). Will leak %u chunks.\n",
738 rc, pDevExt->MemBalloon.cChunks));
739 RTMemFree(pDevExt->MemBalloon.paMemObj);
740 pDevExt->MemBalloon.paMemObj = NULL;
741 }
742
743 pDevExt->MemBalloon.pOwner = NULL;
744 }
745 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
746}
747
748/** @} */
749
750
751
752/** @name Heartbeat
753 * @{
754 */
755
756/**
757 * Sends heartbeat to host.
758 *
759 * @returns VBox status code.
760 */
761static int vbgdHeartbeatSend(PVBOXGUESTDEVEXT pDevExt)
762{
763 int rc;
764 if (pDevExt->pReqGuestHeartbeat)
765 {
766 rc = VbglGRPerform(pDevExt->pReqGuestHeartbeat);
767 Log(("vbgdHeartbeatSend: VbglGRPerform vbgdHeartbeatSend completed with rc=%Rrc\n", rc));
768 }
769 else
770 rc = VERR_INVALID_STATE;
771 return rc;
772}
773
774
775/**
776 * Callback for heartbeat timer.
777 */
778static DECLCALLBACK(void) vbgdHeartbeatTimerHandler(PRTTIMER hTimer, void *pvUser, uint64_t iTick)
779{
780 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
781 int rc;
782 AssertReturnVoid(pDevExt);
783
784 rc = vbgdHeartbeatSend(pDevExt);
785 if (RT_FAILURE(rc))
786 Log(("HB Timer: vbgdHeartbeatSend failed: rc=%Rrc\n", rc));
787
788 NOREF(hTimer); NOREF(iTick);
789}
790
791
792/**
793 * Configure the host to check guest's heartbeat
794 * and get heartbeat interval from the host.
795 *
796 * @returns VBox status code.
797 * @param pDevExt The device extension.
798 * @param fEnabled Set true to enable guest heartbeat checks on host.
799 */
800static int vbgdHeartbeatHostConfigure(PVBOXGUESTDEVEXT pDevExt, bool fEnabled)
801{
802 VMMDevReqHeartbeat *pReq;
803 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_HeartbeatConfigure);
804 Log(("vbgdHeartbeatHostConfigure: VbglGRAlloc vbgdHeartbeatHostConfigure completed with rc=%Rrc\n", rc));
805 if (RT_SUCCESS(rc))
806 {
807 pReq->fEnabled = fEnabled;
808 pReq->cNsInterval = 0;
809 rc = VbglGRPerform(&pReq->header);
810 Log(("vbgdHeartbeatHostConfigure: VbglGRPerform vbgdHeartbeatHostConfigure completed with rc=%Rrc\n", rc));
811 pDevExt->cNsHeartbeatInterval = pReq->cNsInterval;
812 VbglGRFree(&pReq->header);
813 }
814 return rc;
815}
816
817
818/**
819 * Initializes the heartbeat timer.
820 *
821 * This feature may be disabled by the host.
822 *
823 * @returns VBox status (ignored).
824 * @param pDevExt The device extension.
825 */
826static int vbgdHeartbeatInit(PVBOXGUESTDEVEXT pDevExt)
827{
828 /*
829 * Make sure that heartbeat checking is disabled.
830 */
831 int rc = vbgdHeartbeatHostConfigure(pDevExt, false);
832 if (RT_SUCCESS(rc))
833 {
834 rc = vbgdHeartbeatHostConfigure(pDevExt, true);
835 if (RT_SUCCESS(rc))
836 {
837 /*
838 * Preallocate the request to use it from the timer callback because:
839 * 1) on Windows VbglGRAlloc must be called at IRQL <= APC_LEVEL
840 * and the timer callback runs at DISPATCH_LEVEL;
841 * 2) avoid repeated allocations.
842 */
843 rc = VbglGRAlloc(&pDevExt->pReqGuestHeartbeat, sizeof(*pDevExt->pReqGuestHeartbeat), VMMDevReq_GuestHeartbeat);
844 if (RT_SUCCESS(rc))
845 {
846 LogRel(("VbgdCommonInitDevExt: Setting up heartbeat to trigger every %RU64 sec\n",
847 pDevExt->cNsHeartbeatInterval / RT_NS_1SEC));
848 rc = RTTimerCreateEx(&pDevExt->pHeartbeatTimer, pDevExt->cNsHeartbeatInterval, 0 /*fFlags*/,
849 (PFNRTTIMER)vbgdHeartbeatTimerHandler, pDevExt);
850 if (RT_SUCCESS(rc))
851 {
852 rc = RTTimerStart(pDevExt->pHeartbeatTimer, 0);
853 if (RT_SUCCESS(rc))
854 return VINF_SUCCESS;
855
856 LogRel(("VbgdCommonInitDevExt: Heartbeat timer failed to start, rc=%Rrc\n", rc));
857 }
858 else
859 LogRel(("VbgdCommonInitDevExt: Failed to create heartbeat timer: %Rrc\n", rc));
860
861 VbglGRFree(pDevExt->pReqGuestHeartbeat);
862 pDevExt->pReqGuestHeartbeat = NULL;
863 }
864 else
865 LogRel(("VbgdCommonInitDevExt: VbglGRAlloc(VMMDevReq_GuestHeartbeat): %Rrc\n", rc));
866
867 LogRel(("VbgdCommonInitDevExt: Failed to set up the timer, guest heartbeat is disabled\n"));
868 vbgdHeartbeatHostConfigure(pDevExt, false);
869 }
870 else
871 LogRel(("VbgdCommonInitDevExt: Failed to configure host for heartbeat checking: rc=%Rrc\n", rc));
872 }
873 return rc;
874}
875
876/** @} */
877
878
879/**
880 * Helper to reinit the VMMDev communication after hibernation.
881 *
882 * @returns VBox status code.
883 * @param pDevExt The device extension.
884 * @param enmOSType The OS type.
885 *
886 * @todo Call this on all platforms, not just windows.
887 */
888int VbgdCommonReinitDevExtAfterHibernation(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
889{
890 int rc = vbgdReportGuestInfo(enmOSType);
891 if (RT_SUCCESS(rc))
892 {
893 rc = vbgdReportDriverStatus(true /* Driver is active */);
894 if (RT_FAILURE(rc))
895 Log(("VbgdCommonReinitDevExtAfterHibernation: could not report guest driver status, rc=%Rrc\n", rc));
896 }
897 else
898 Log(("VbgdCommonReinitDevExtAfterHibernation: could not report guest information to host, rc=%Rrc\n", rc));
899 LogFlow(("VbgdCommonReinitDevExtAfterHibernation: returned with rc=%Rrc\n", rc));
900 return rc;
901}
902
903
904/**
905 * Initializes the VBoxGuest device extension when the
906 * device driver is loaded.
907 *
908 * The native code locates the VMMDev on the PCI bus and retrieve
909 * the MMIO and I/O port ranges, this function will take care of
910 * mapping the MMIO memory (if present). Upon successful return
911 * the native code should set up the interrupt handler.
912 *
913 * @returns VBox status code.
914 *
915 * @param pDevExt The device extension. Allocated by the native code.
916 * @param IOPortBase The base of the I/O port range.
917 * @param pvMMIOBase The base of the MMIO memory mapping.
918 * This is optional, pass NULL if not present.
919 * @param cbMMIO The size of the MMIO memory mapping.
920 * This is optional, pass 0 if not present.
921 * @param enmOSType The guest OS type to report to the VMMDev.
922 * @param fFixedEvents Events that will be enabled upon init and no client
923 * will ever be allowed to mask.
924 */
925int VbgdCommonInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
926 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
927{
928 int rc, rc2;
929
930#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
931 /*
932 * Create the release log.
933 */
934 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
935 PRTLOGGER pRelLogger;
936 rc = RTLogCreate(&pRelLogger, 0 /*fFlags*/, "all", "VBOXGUEST_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups,
937 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
938 if (RT_SUCCESS(rc))
939 RTLogRelSetDefaultInstance(pRelLogger);
940 /** @todo Add native hook for getting logger config parameters and setting
941 * them. On linux we should use the module parameter stuff... */
942#endif
943
944 /*
945 * Adjust fFixedEvents.
946 */
947#ifdef VBOX_WITH_HGCM
948 fFixedEvents |= VMMDEV_EVENT_HGCM;
949#endif
950
951 /*
952 * Initialize the data.
953 */
954 pDevExt->IOPortBase = IOPortBase;
955 pDevExt->pVMMDevMemory = NULL;
956 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
957 pDevExt->EventSpinlock = NIL_RTSPINLOCK;
958 pDevExt->pIrqAckEvents = NULL;
959 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
960 RTListInit(&pDevExt->WaitList);
961#ifdef VBOX_WITH_HGCM
962 RTListInit(&pDevExt->HGCMWaitList);
963#endif
964#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
965 RTListInit(&pDevExt->WakeUpList);
966#endif
967 RTListInit(&pDevExt->WokenUpList);
968 RTListInit(&pDevExt->FreeList);
969 RTListInit(&pDevExt->SessionList);
970 pDevExt->cSessions = 0;
971 pDevExt->fLoggingEnabled = false;
972 pDevExt->f32PendingEvents = 0;
973 pDevExt->u32MousePosChangedSeq = 0;
974 pDevExt->SessionSpinlock = NIL_RTSPINLOCK;
975 pDevExt->MemBalloon.hMtx = NIL_RTSEMFASTMUTEX;
976 pDevExt->MemBalloon.cChunks = 0;
977 pDevExt->MemBalloon.cMaxChunks = 0;
978 pDevExt->MemBalloon.fUseKernelAPI = true;
979 pDevExt->MemBalloon.paMemObj = NULL;
980 pDevExt->MemBalloon.pOwner = NULL;
981 pDevExt->MouseNotifyCallback.pfnNotify = NULL;
982 pDevExt->MouseNotifyCallback.pvUser = NULL;
983 pDevExt->pReqGuestHeartbeat = NULL;
984
985 pDevExt->fFixedEvents = fFixedEvents;
986 vbgdBitUsageTrackerClear(&pDevExt->EventFilterTracker);
987 pDevExt->fEventFilterHost = UINT32_MAX; /* forces a report */
988
989 vbgdBitUsageTrackerClear(&pDevExt->MouseStatusTracker);
990 pDevExt->fMouseStatusHost = UINT32_MAX; /* forces a report */
991
992 pDevExt->fAcquireModeGuestCaps = 0;
993 pDevExt->fSetModeGuestCaps = 0;
994 pDevExt->fAcquiredGuestCaps = 0;
995 vbgdBitUsageTrackerClear(&pDevExt->SetGuestCapsTracker);
996 pDevExt->fGuestCapsHost = UINT32_MAX; /* forces a report */
997
998 /*
999 * If there is an MMIO region validate the version and size.
1000 */
1001 if (pvMMIOBase)
1002 {
1003 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
1004 Assert(cbMMIO);
1005 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
1006 && pVMMDev->u32Size >= 32
1007 && pVMMDev->u32Size <= cbMMIO)
1008 {
1009 pDevExt->pVMMDevMemory = pVMMDev;
1010 Log(("VbgdCommonInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
1011 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
1012 }
1013 else /* try live without it. */
1014 LogRel(("VbgdCommonInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
1015 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
1016 }
1017
1018 /*
1019 * Create the wait and session spinlocks as well as the ballooning mutex.
1020 */
1021 rc = RTSpinlockCreate(&pDevExt->EventSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestEvent");
1022 if (RT_SUCCESS(rc))
1023 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestSession");
1024 if (RT_FAILURE(rc))
1025 {
1026 LogRel(("VbgdCommonInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
1027 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
1028 RTSpinlockDestroy(pDevExt->EventSpinlock);
1029 return rc;
1030 }
1031
1032 rc = RTSemFastMutexCreate(&pDevExt->MemBalloon.hMtx);
1033 if (RT_FAILURE(rc))
1034 {
1035 LogRel(("VbgdCommonInitDevExt: failed to create mutex, rc=%Rrc!\n", rc));
1036 RTSpinlockDestroy(pDevExt->SessionSpinlock);
1037 RTSpinlockDestroy(pDevExt->EventSpinlock);
1038 return rc;
1039 }
1040
1041 /*
1042 * Initialize the guest library and report the guest info back to VMMDev,
1043 * set the interrupt control filter mask, and fixate the guest mappings
1044 * made by the VMM.
1045 */
1046 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
1047 if (RT_SUCCESS(rc))
1048 {
1049 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
1050 if (RT_SUCCESS(rc))
1051 {
1052 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
1053 Assert(pDevExt->PhysIrqAckEvents != 0);
1054
1055 rc = vbgdReportGuestInfo(enmOSType);
1056 if (RT_SUCCESS(rc))
1057 {
1058 /*
1059 * Set the fixed event and make sure the host doesn't have any lingering
1060 * the guest capabilities or mouse status bits set.
1061 */
1062 rc = vbgdResetEventFilterOnHost(pDevExt, pDevExt->fFixedEvents);
1063 if (RT_SUCCESS(rc))
1064 {
1065 rc = vbgdResetCapabilitiesOnHost(pDevExt);
1066 if (RT_SUCCESS(rc))
1067 {
1068 rc = vbgdResetMouseStatusOnHost(pDevExt);
1069 if (RT_SUCCESS(rc))
1070 {
1071 /*
1072 * Initialize stuff which may fail without requiring the driver init to fail.
1073 */
1074 vbgdInitFixateGuestMappings(pDevExt);
1075 vbgdHeartbeatInit(pDevExt);
1076
1077 /*
1078 * Done!
1079 */
1080 rc = vbgdReportDriverStatus(true /* Driver is active */);
1081 if (RT_FAILURE(rc))
1082 LogRel(("VbgdCommonInitDevExt: VBoxReportGuestDriverStatus failed, rc=%Rrc\n", rc));
1083
1084 LogFlowFunc(("VbgdCommonInitDevExt: returns success\n"));
1085 return VINF_SUCCESS;
1086 }
1087 LogRel(("VbgdCommonInitDevExt: failed to clear mouse status: rc=%Rrc\n", rc));
1088 }
1089 else
1090 LogRel(("VbgdCommonInitDevExt: failed to clear guest capabilities: rc=%Rrc\n", rc));
1091 }
1092 else
1093 LogRel(("VbgdCommonInitDevExt: failed to set fixed event filter: rc=%Rrc\n", rc));
1094 }
1095 else
1096 LogRel(("VbgdCommonInitDevExt: VBoxReportGuestInfo failed: rc=%Rrc\n", rc));
1097 VbglGRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
1098 }
1099 else
1100 LogRel(("VbgdCommonInitDevExt: VBoxGRAlloc failed: rc=%Rrc\n", rc));
1101
1102 VbglTerminate();
1103 }
1104 else
1105 LogRel(("VbgdCommonInitDevExt: VbglInit failed: rc=%Rrc\n", rc));
1106
1107 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
1108 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
1109 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
1110
1111#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
1112 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
1113 RTLogDestroy(RTLogSetDefaultInstance(NULL));
1114#endif
1115 return rc; /* (failed) */
1116}
1117
1118
1119/**
1120 * Deletes all the items in a wait chain.
1121 * @param pList The head of the chain.
1122 */
1123static void vbgdDeleteWaitList(PRTLISTNODE pList)
1124{
1125 while (!RTListIsEmpty(pList))
1126 {
1127 int rc2;
1128 PVBOXGUESTWAIT pWait = RTListGetFirst(pList, VBOXGUESTWAIT, ListNode);
1129 RTListNodeRemove(&pWait->ListNode);
1130
1131 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
1132 pWait->Event = NIL_RTSEMEVENTMULTI;
1133 pWait->pSession = NULL;
1134 RTMemFree(pWait);
1135 }
1136}
1137
1138
1139/**
1140 * Destroys the VBoxGuest device extension.
1141 *
1142 * The native code should call this before the driver is loaded,
1143 * but don't call this on shutdown.
1144 *
1145 * @param pDevExt The device extension.
1146 */
1147void VbgdCommonDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
1148{
1149 int rc2;
1150 Log(("VbgdCommonDeleteDevExt:\n"));
1151 Log(("VBoxGuest: The additions driver is terminating.\n"));
1152
1153 /*
1154 * Stop and destroy HB timer and
1155 * disable host heartbeat checking.
1156 */
1157 if (pDevExt->pHeartbeatTimer)
1158 {
1159 RTTimerDestroy(pDevExt->pHeartbeatTimer);
1160 vbgdHeartbeatHostConfigure(pDevExt, false);
1161 }
1162
1163 VbglGRFree(pDevExt->pReqGuestHeartbeat);
1164 pDevExt->pReqGuestHeartbeat = NULL;
1165
1166 /*
1167 * Clean up the bits that involves the host first.
1168 */
1169 vbgdTermUnfixGuestMappings(pDevExt);
1170 if (!RTListIsEmpty(&pDevExt->SessionList))
1171 {
1172 LogRelFunc(("session list not empty!\n"));
1173 RTListInit(&pDevExt->SessionList);
1174 }
1175 /* Update the host flags (mouse status etc) not to reflect this session. */
1176 pDevExt->fFixedEvents = 0;
1177 vbgdResetEventFilterOnHost(pDevExt, 0 /*fFixedEvents*/);
1178 vbgdResetCapabilitiesOnHost(pDevExt);
1179 vbgdResetMouseStatusOnHost(pDevExt);
1180
1181 vbgdCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
1182
1183 /*
1184 * Cleanup all the other resources.
1185 */
1186 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
1187 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
1188 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
1189
1190 vbgdDeleteWaitList(&pDevExt->WaitList);
1191#ifdef VBOX_WITH_HGCM
1192 vbgdDeleteWaitList(&pDevExt->HGCMWaitList);
1193#endif
1194#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1195 vbgdDeleteWaitList(&pDevExt->WakeUpList);
1196#endif
1197 vbgdDeleteWaitList(&pDevExt->WokenUpList);
1198 vbgdDeleteWaitList(&pDevExt->FreeList);
1199
1200 VbglTerminate();
1201
1202 pDevExt->pVMMDevMemory = NULL;
1203
1204 pDevExt->IOPortBase = 0;
1205 pDevExt->pIrqAckEvents = NULL;
1206
1207#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
1208 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
1209 RTLogDestroy(RTLogSetDefaultInstance(NULL));
1210#endif
1211
1212}
1213
1214
1215/**
1216 * Creates a VBoxGuest user session.
1217 *
1218 * The native code calls this when a ring-3 client opens the device.
1219 * Use VbgdCommonCreateKernelSession when a ring-0 client connects.
1220 *
1221 * @returns VBox status code.
1222 * @param pDevExt The device extension.
1223 * @param ppSession Where to store the session on success.
1224 */
1225int VbgdCommonCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
1226{
1227 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
1228 if (RT_UNLIKELY(!pSession))
1229 {
1230 LogRel(("VbgdCommonCreateUserSession: no memory!\n"));
1231 return VERR_NO_MEMORY;
1232 }
1233
1234 pSession->Process = RTProcSelf();
1235 pSession->R0Process = RTR0ProcHandleSelf();
1236 pSession->pDevExt = pDevExt;
1237 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1238 RTListAppend(&pDevExt->SessionList, &pSession->ListNode);
1239 pDevExt->cSessions++;
1240 RTSpinlockRelease(pDevExt->SessionSpinlock);
1241
1242 *ppSession = pSession;
1243 LogFlow(("VbgdCommonCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1244 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1245 return VINF_SUCCESS;
1246}
1247
1248
1249/**
1250 * Creates a VBoxGuest kernel session.
1251 *
1252 * The native code calls this when a ring-0 client connects to the device.
1253 * Use VbgdCommonCreateUserSession when a ring-3 client opens the device.
1254 *
1255 * @returns VBox status code.
1256 * @param pDevExt The device extension.
1257 * @param ppSession Where to store the session on success.
1258 */
1259int VbgdCommonCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
1260{
1261 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
1262 if (RT_UNLIKELY(!pSession))
1263 {
1264 LogRel(("VbgdCommonCreateKernelSession: no memory!\n"));
1265 return VERR_NO_MEMORY;
1266 }
1267
1268 pSession->Process = NIL_RTPROCESS;
1269 pSession->R0Process = NIL_RTR0PROCESS;
1270 pSession->pDevExt = pDevExt;
1271 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1272 RTListAppend(&pDevExt->SessionList, &pSession->ListNode);
1273 pDevExt->cSessions++;
1274 RTSpinlockRelease(pDevExt->SessionSpinlock);
1275
1276 *ppSession = pSession;
1277 LogFlow(("VbgdCommonCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1278 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1279 return VINF_SUCCESS;
1280}
1281
1282
1283/**
1284 * Closes a VBoxGuest session.
1285 *
1286 * @param pDevExt The device extension.
1287 * @param pSession The session to close (and free).
1288 */
1289void VbgdCommonCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1290{
1291#ifdef VBOX_WITH_HGCM
1292 unsigned i;
1293#endif
1294 LogFlow(("VbgdCommonCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1295 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1296
1297 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1298 RTListNodeRemove(&pSession->ListNode);
1299 pDevExt->cSessions--;
1300 RTSpinlockRelease(pDevExt->SessionSpinlock);
1301 vbgdAcquireSessionCapabilities(pDevExt, pSession, 0, UINT32_MAX, VBOXGUESTCAPSACQUIRE_FLAGS_NONE,
1302 true /*fSessionTermination*/);
1303 vbgdSetSessionCapabilities(pDevExt, pSession, 0 /*fOrMask*/, UINT32_MAX /*fNotMask*/, true /*fSessionTermination*/);
1304 vbgdSetSessionEventFilter(pDevExt, pSession, 0 /*fOrMask*/, UINT32_MAX /*fNotMask*/, true /*fSessionTermination*/);
1305 vbgdSetSessionMouseStatus(pDevExt, pSession, 0 /*fOrMask*/, UINT32_MAX /*fNotMask*/, true /*fSessionTermination*/);
1306
1307 vbgdIoCtl_CancelAllWaitEvents(pDevExt, pSession);
1308
1309#ifdef VBOX_WITH_HGCM
1310 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1311 if (pSession->aHGCMClientIds[i])
1312 {
1313 VBoxGuestHGCMDisconnectInfo Info;
1314 Info.result = 0;
1315 Info.u32ClientID = pSession->aHGCMClientIds[i];
1316 pSession->aHGCMClientIds[i] = 0;
1317 Log(("VbgdCommonCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
1318 VbglR0HGCMInternalDisconnect(&Info, vbgdHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1319 }
1320#endif
1321
1322 pSession->pDevExt = NULL;
1323 pSession->Process = NIL_RTPROCESS;
1324 pSession->R0Process = NIL_RTR0PROCESS;
1325 vbgdCloseMemBalloon(pDevExt, pSession);
1326 RTMemFree(pSession);
1327}
1328
1329
1330/**
1331 * Allocates a wait-for-event entry.
1332 *
1333 * @returns The wait-for-event entry.
1334 * @param pDevExt The device extension.
1335 * @param pSession The session that's allocating this. Can be NULL.
1336 */
1337static PVBOXGUESTWAIT vbgdWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1338{
1339 /*
1340 * Allocate it one way or the other.
1341 */
1342 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1343 if (pWait)
1344 {
1345 RTSpinlockAcquire(pDevExt->EventSpinlock);
1346
1347 pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1348 if (pWait)
1349 RTListNodeRemove(&pWait->ListNode);
1350
1351 RTSpinlockRelease(pDevExt->EventSpinlock);
1352 }
1353 if (!pWait)
1354 {
1355 int rc;
1356
1357 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1358 if (!pWait)
1359 {
1360 LogRelMax(32, ("vbgdWaitAlloc: out-of-memory!\n"));
1361 return NULL;
1362 }
1363
1364 rc = RTSemEventMultiCreate(&pWait->Event);
1365 if (RT_FAILURE(rc))
1366 {
1367 LogRelMax(32, ("VbgdCommonIoCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
1368 RTMemFree(pWait);
1369 return NULL;
1370 }
1371
1372 pWait->ListNode.pNext = NULL;
1373 pWait->ListNode.pPrev = NULL;
1374 }
1375
1376 /*
1377 * Zero members just as an precaution.
1378 */
1379 pWait->fReqEvents = 0;
1380 pWait->fResEvents = 0;
1381#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1382 pWait->fPendingWakeUp = false;
1383 pWait->fFreeMe = false;
1384#endif
1385 pWait->pSession = pSession;
1386#ifdef VBOX_WITH_HGCM
1387 pWait->pHGCMReq = NULL;
1388#endif
1389 RTSemEventMultiReset(pWait->Event);
1390 return pWait;
1391}
1392
1393
1394/**
1395 * Frees the wait-for-event entry.
1396 *
1397 * The caller must own the wait spinlock !
1398 * The entry must be in a list!
1399 *
1400 * @param pDevExt The device extension.
1401 * @param pWait The wait-for-event entry to free.
1402 */
1403static void vbgdWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1404{
1405 pWait->fReqEvents = 0;
1406 pWait->fResEvents = 0;
1407#ifdef VBOX_WITH_HGCM
1408 pWait->pHGCMReq = NULL;
1409#endif
1410#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1411 Assert(!pWait->fFreeMe);
1412 if (pWait->fPendingWakeUp)
1413 pWait->fFreeMe = true;
1414 else
1415#endif
1416 {
1417 RTListNodeRemove(&pWait->ListNode);
1418 RTListAppend(&pDevExt->FreeList, &pWait->ListNode);
1419 }
1420}
1421
1422
1423/**
1424 * Frees the wait-for-event entry.
1425 *
1426 * @param pDevExt The device extension.
1427 * @param pWait The wait-for-event entry to free.
1428 */
1429static void vbgdWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1430{
1431 RTSpinlockAcquire(pDevExt->EventSpinlock);
1432 vbgdWaitFreeLocked(pDevExt, pWait);
1433 RTSpinlockRelease(pDevExt->EventSpinlock);
1434}
1435
1436
1437#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1438/**
1439 * Processes the wake-up list.
1440 *
1441 * All entries in the wake-up list gets signalled and moved to the woken-up
1442 * list.
1443 *
1444 * @param pDevExt The device extension.
1445 */
1446void VbgdCommonWaitDoWakeUps(PVBOXGUESTDEVEXT pDevExt)
1447{
1448 if (!RTListIsEmpty(&pDevExt->WakeUpList))
1449 {
1450 RTSpinlockAcquire(pDevExt->EventSpinlock);
1451 for (;;)
1452 {
1453 int rc;
1454 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->WakeUpList, VBOXGUESTWAIT, ListNode);
1455 if (!pWait)
1456 break;
1457 pWait->fPendingWakeUp = true;
1458 RTSpinlockRelease(pDevExt->EventSpinlock);
1459
1460 rc = RTSemEventMultiSignal(pWait->Event);
1461 AssertRC(rc);
1462
1463 RTSpinlockAcquire(pDevExt->EventSpinlock);
1464 pWait->fPendingWakeUp = false;
1465 if (!pWait->fFreeMe)
1466 {
1467 RTListNodeRemove(&pWait->ListNode);
1468 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1469 }
1470 else
1471 {
1472 pWait->fFreeMe = false;
1473 vbgdWaitFreeLocked(pDevExt, pWait);
1474 }
1475 }
1476 RTSpinlockRelease(pDevExt->EventSpinlock);
1477 }
1478}
1479#endif /* VBOXGUEST_USE_DEFERRED_WAKE_UP */
1480
1481
1482/**
1483 * Implements the fast (no input or output) type of IOCtls.
1484 *
1485 * This is currently just a placeholder stub inherited from the support driver code.
1486 *
1487 * @returns VBox status code.
1488 * @param iFunction The IOCtl function number.
1489 * @param pDevExt The device extension.
1490 * @param pSession The session.
1491 */
1492int VbgdCommonIoCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1493{
1494 LogFlow(("VbgdCommonIoCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1495
1496 NOREF(iFunction);
1497 NOREF(pDevExt);
1498 NOREF(pSession);
1499 return VERR_NOT_SUPPORTED;
1500}
1501
1502
1503/**
1504 * Return the VMM device port.
1505 *
1506 * returns IPRT status code.
1507 * @param pDevExt The device extension.
1508 * @param pInfo The request info.
1509 * @param pcbDataReturned (out) contains the number of bytes to return.
1510 */
1511static int vbgdIoCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
1512{
1513 LogFlow(("VBOXGUEST_IOCTL_GETVMMDEVPORT\n"));
1514
1515 pInfo->portAddress = pDevExt->IOPortBase;
1516 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
1517 if (pcbDataReturned)
1518 *pcbDataReturned = sizeof(*pInfo);
1519 return VINF_SUCCESS;
1520}
1521
1522
1523#ifndef RT_OS_WINDOWS
1524/**
1525 * Set the callback for the kernel mouse handler.
1526 *
1527 * returns IPRT status code.
1528 * @param pDevExt The device extension.
1529 * @param pNotify The new callback information.
1530 */
1531int vbgdIoCtl_SetMouseNotifyCallback(PVBOXGUESTDEVEXT pDevExt, VBoxGuestMouseSetNotifyCallback *pNotify)
1532{
1533 LogFlow(("VBOXGUEST_IOCTL_SET_MOUSE_NOTIFY_CALLBACK: pfnNotify=%p pvUser=%p\n", pNotify->pfnNotify, pNotify->pvUser));
1534
1535 RTSpinlockAcquire(pDevExt->EventSpinlock);
1536 pDevExt->MouseNotifyCallback = *pNotify;
1537 RTSpinlockRelease(pDevExt->EventSpinlock);
1538 return VINF_SUCCESS;
1539}
1540#endif
1541
1542
1543/**
1544 * Worker vbgdIoCtl_WaitEvent.
1545 *
1546 * The caller enters the spinlock, we leave it.
1547 *
1548 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
1549 */
1550DECLINLINE(int) vbdgCheckWaitEventCondition(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1551 VBoxGuestWaitEventInfo *pInfo, int iEvent, const uint32_t fReqEvents)
1552{
1553 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
1554 if (fMatches & VBOXGUEST_ACQUIRE_STYLE_EVENTS)
1555 fMatches &= vbgdGetAllowedEventMaskForSession(pDevExt, pSession);
1556 if (fMatches || pSession->fPendingCancelWaitEvents)
1557 {
1558 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
1559 RTSpinlockRelease(pDevExt->EventSpinlock);
1560
1561 pInfo->u32EventFlagsOut = fMatches;
1562 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1563 if (fReqEvents & ~((uint32_t)1 << iEvent))
1564 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1565 else
1566 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1567 pSession->fPendingCancelWaitEvents = false;
1568 return VINF_SUCCESS;
1569 }
1570
1571 RTSpinlockRelease(pDevExt->EventSpinlock);
1572 return VERR_TIMEOUT;
1573}
1574
1575
1576static int vbgdIoCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1577 VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned, bool fInterruptible)
1578{
1579 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
1580 uint32_t fResEvents;
1581 int iEvent;
1582 PVBOXGUESTWAIT pWait;
1583 int rc;
1584
1585 pInfo->u32EventFlagsOut = 0;
1586 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1587 if (pcbDataReturned)
1588 *pcbDataReturned = sizeof(*pInfo);
1589
1590 /*
1591 * Copy and verify the input mask.
1592 */
1593 iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
1594 if (RT_UNLIKELY(iEvent < 0))
1595 {
1596 LogRel(("VBOXGUEST_IOCTL_WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
1597 return VERR_INVALID_PARAMETER;
1598 }
1599
1600 /*
1601 * Check the condition up front, before doing the wait-for-event allocations.
1602 */
1603 RTSpinlockAcquire(pDevExt->EventSpinlock);
1604 rc = vbdgCheckWaitEventCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
1605 if (rc == VINF_SUCCESS)
1606 return rc;
1607
1608 if (!pInfo->u32TimeoutIn)
1609 {
1610 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1611 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns VERR_TIMEOUT\n"));
1612 return VERR_TIMEOUT;
1613 }
1614
1615 pWait = vbgdWaitAlloc(pDevExt, pSession);
1616 if (!pWait)
1617 return VERR_NO_MEMORY;
1618 pWait->fReqEvents = fReqEvents;
1619
1620 /*
1621 * We've got the wait entry now, re-enter the spinlock and check for the condition.
1622 * If the wait condition is met, return.
1623 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
1624 */
1625 RTSpinlockAcquire(pDevExt->EventSpinlock);
1626 RTListAppend(&pDevExt->WaitList, &pWait->ListNode);
1627 rc = vbdgCheckWaitEventCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
1628 if (rc == VINF_SUCCESS)
1629 {
1630 vbgdWaitFreeUnlocked(pDevExt, pWait);
1631 return rc;
1632 }
1633
1634 if (fInterruptible)
1635 rc = RTSemEventMultiWaitNoResume(pWait->Event,
1636 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1637 else
1638 rc = RTSemEventMultiWait(pWait->Event,
1639 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1640
1641 /*
1642 * There is one special case here and that's when the semaphore is
1643 * destroyed upon device driver unload. This shouldn't happen of course,
1644 * but in case it does, just get out of here ASAP.
1645 */
1646 if (rc == VERR_SEM_DESTROYED)
1647 return rc;
1648
1649 /*
1650 * Unlink the wait item and dispose of it.
1651 */
1652 RTSpinlockAcquire(pDevExt->EventSpinlock);
1653 fResEvents = pWait->fResEvents;
1654 vbgdWaitFreeLocked(pDevExt, pWait);
1655 RTSpinlockRelease(pDevExt->EventSpinlock);
1656
1657 /*
1658 * Now deal with the return code.
1659 */
1660 if ( fResEvents
1661 && fResEvents != UINT32_MAX)
1662 {
1663 pInfo->u32EventFlagsOut = fResEvents;
1664 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1665 if (fReqEvents & ~((uint32_t)1 << iEvent))
1666 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1667 else
1668 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1669 rc = VINF_SUCCESS;
1670 }
1671 else if ( fResEvents == UINT32_MAX
1672 || rc == VERR_INTERRUPTED)
1673 {
1674 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
1675 rc = VERR_INTERRUPTED;
1676 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns VERR_INTERRUPTED\n"));
1677 }
1678 else if (rc == VERR_TIMEOUT)
1679 {
1680 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1681 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns VERR_TIMEOUT (2)\n"));
1682 }
1683 else
1684 {
1685 if (RT_SUCCESS(rc))
1686 {
1687 LogRelMax(32, ("VBOXGUEST_IOCTL_WAITEVENT: returns %Rrc but no events!\n", rc));
1688 rc = VERR_INTERNAL_ERROR;
1689 }
1690 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1691 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %Rrc\n", rc));
1692 }
1693
1694 return rc;
1695}
1696
1697
1698static int vbgdIoCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1699{
1700 PVBOXGUESTWAIT pWait;
1701 PVBOXGUESTWAIT pSafe;
1702 int rc = 0;
1703 /* Was as least one WAITEVENT in process for this session? If not we
1704 * set a flag that the next call should be interrupted immediately. This
1705 * is needed so that a user thread can reliably interrupt another one in a
1706 * WAITEVENT loop. */
1707 bool fCancelledOne = false;
1708
1709 LogFlow(("VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS\n"));
1710
1711 /*
1712 * Walk the event list and wake up anyone with a matching session.
1713 */
1714 RTSpinlockAcquire(pDevExt->EventSpinlock);
1715 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
1716 {
1717 if (pWait->pSession == pSession)
1718 {
1719 fCancelledOne = true;
1720 pWait->fResEvents = UINT32_MAX;
1721 RTListNodeRemove(&pWait->ListNode);
1722#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1723 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
1724#else
1725 rc |= RTSemEventMultiSignal(pWait->Event);
1726 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1727#endif
1728 }
1729 }
1730 if (!fCancelledOne)
1731 pSession->fPendingCancelWaitEvents = true;
1732 RTSpinlockRelease(pDevExt->EventSpinlock);
1733 Assert(rc == 0);
1734 NOREF(rc);
1735
1736#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1737 VbgdCommonWaitDoWakeUps(pDevExt);
1738#endif
1739
1740 return VINF_SUCCESS;
1741}
1742
1743
1744/**
1745 * Checks if the VMM request is allowed in the context of the given session.
1746 *
1747 * @returns VINF_SUCCESS or VERR_PERMISSION_DENIED.
1748 * @param pSession The calling session.
1749 * @param enmType The request type.
1750 * @param pReqHdr The request.
1751 */
1752static int vbgdCheckIfVmmReqIsAllowed(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VMMDevRequestType enmType,
1753 VMMDevRequestHeader const *pReqHdr)
1754{
1755 /*
1756 * Categorize the request being made.
1757 */
1758 /** @todo This need quite some more work! */
1759 enum
1760 {
1761 kLevel_Invalid, kLevel_NoOne, kLevel_OnlyVBoxGuest, kLevel_OnlyKernel, kLevel_TrustedUsers, kLevel_AllUsers
1762 } enmRequired;
1763 switch (enmType)
1764 {
1765 /*
1766 * Deny access to anything we don't know or provide specialized I/O controls for.
1767 */
1768#ifdef VBOX_WITH_HGCM
1769 case VMMDevReq_HGCMConnect:
1770 case VMMDevReq_HGCMDisconnect:
1771# ifdef VBOX_WITH_64_BITS_GUESTS
1772 case VMMDevReq_HGCMCall32:
1773 case VMMDevReq_HGCMCall64:
1774# else
1775 case VMMDevReq_HGCMCall:
1776# endif /* VBOX_WITH_64_BITS_GUESTS */
1777 case VMMDevReq_HGCMCancel:
1778 case VMMDevReq_HGCMCancel2:
1779#endif /* VBOX_WITH_HGCM */
1780 case VMMDevReq_SetGuestCapabilities:
1781 default:
1782 enmRequired = kLevel_NoOne;
1783 break;
1784
1785 /*
1786 * There are a few things only this driver can do (and it doesn't use
1787 * the VMMRequst I/O control route anyway, but whatever).
1788 */
1789 case VMMDevReq_ReportGuestInfo:
1790 case VMMDevReq_ReportGuestInfo2:
1791 case VMMDevReq_GetHypervisorInfo:
1792 case VMMDevReq_SetHypervisorInfo:
1793 case VMMDevReq_RegisterPatchMemory:
1794 case VMMDevReq_DeregisterPatchMemory:
1795 case VMMDevReq_GetMemBalloonChangeRequest:
1796 enmRequired = kLevel_OnlyVBoxGuest;
1797 break;
1798
1799 /*
1800 * Trusted users apps only.
1801 */
1802 case VMMDevReq_QueryCredentials:
1803 case VMMDevReq_ReportCredentialsJudgement:
1804 case VMMDevReq_RegisterSharedModule:
1805 case VMMDevReq_UnregisterSharedModule:
1806 case VMMDevReq_WriteCoreDump:
1807 case VMMDevReq_GetCpuHotPlugRequest:
1808 case VMMDevReq_SetCpuHotPlugStatus:
1809 case VMMDevReq_CheckSharedModules:
1810 case VMMDevReq_GetPageSharingStatus:
1811 case VMMDevReq_DebugIsPageShared:
1812 case VMMDevReq_ReportGuestStats:
1813 case VMMDevReq_ReportGuestUserState:
1814 case VMMDevReq_GetStatisticsChangeRequest:
1815 case VMMDevReq_ChangeMemBalloon:
1816 enmRequired = kLevel_TrustedUsers;
1817 break;
1818
1819 /*
1820 * Anyone.
1821 */
1822 case VMMDevReq_GetMouseStatus:
1823 case VMMDevReq_SetMouseStatus:
1824 case VMMDevReq_SetPointerShape:
1825 case VMMDevReq_GetHostVersion:
1826 case VMMDevReq_Idle:
1827 case VMMDevReq_GetHostTime:
1828 case VMMDevReq_SetPowerStatus:
1829 case VMMDevReq_AcknowledgeEvents:
1830 case VMMDevReq_CtlGuestFilterMask:
1831 case VMMDevReq_ReportGuestStatus:
1832 case VMMDevReq_GetDisplayChangeRequest:
1833 case VMMDevReq_VideoModeSupported:
1834 case VMMDevReq_GetHeightReduction:
1835 case VMMDevReq_GetDisplayChangeRequest2:
1836 case VMMDevReq_VideoModeSupported2:
1837 case VMMDevReq_VideoAccelEnable:
1838 case VMMDevReq_VideoAccelFlush:
1839 case VMMDevReq_VideoSetVisibleRegion:
1840 case VMMDevReq_GetDisplayChangeRequestEx:
1841 case VMMDevReq_GetSeamlessChangeRequest:
1842 case VMMDevReq_GetVRDPChangeRequest:
1843 case VMMDevReq_LogString:
1844 case VMMDevReq_GetSessionId:
1845 enmRequired = kLevel_AllUsers;
1846 break;
1847
1848 /*
1849 * Depends on the request parameters...
1850 */
1851 /** @todo this have to be changed into an I/O control and the facilities
1852 * tracked in the session so they can automatically be failed when the
1853 * session terminates without reporting the new status.
1854 *
1855 * The information presented by IGuest is not reliable without this! */
1856 case VMMDevReq_ReportGuestCapabilities:
1857 switch (((VMMDevReportGuestStatus const *)pReqHdr)->guestStatus.facility)
1858 {
1859 case VBoxGuestFacilityType_All:
1860 case VBoxGuestFacilityType_VBoxGuestDriver:
1861 enmRequired = kLevel_OnlyVBoxGuest;
1862 break;
1863 case VBoxGuestFacilityType_VBoxService:
1864 enmRequired = kLevel_TrustedUsers;
1865 break;
1866 case VBoxGuestFacilityType_VBoxTrayClient:
1867 case VBoxGuestFacilityType_Seamless:
1868 case VBoxGuestFacilityType_Graphics:
1869 default:
1870 enmRequired = kLevel_AllUsers;
1871 break;
1872 }
1873 break;
1874 }
1875
1876 /*
1877 * Check against the session.
1878 */
1879 switch (enmRequired)
1880 {
1881 default:
1882 case kLevel_NoOne:
1883 break;
1884 case kLevel_OnlyVBoxGuest:
1885 case kLevel_OnlyKernel:
1886 if (pSession->R0Process == NIL_RTR0PROCESS)
1887 return VINF_SUCCESS;
1888 break;
1889 case kLevel_TrustedUsers:
1890 case kLevel_AllUsers:
1891 return VINF_SUCCESS;
1892 }
1893
1894 return VERR_PERMISSION_DENIED;
1895}
1896
1897static int vbgdIoCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1898 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
1899{
1900 int rc;
1901 VMMDevRequestHeader *pReqCopy;
1902
1903 /*
1904 * Validate the header and request size.
1905 */
1906 const VMMDevRequestType enmType = pReqHdr->requestType;
1907 const uint32_t cbReq = pReqHdr->size;
1908 const uint32_t cbMinSize = (uint32_t)vmmdevGetRequestSize(enmType);
1909
1910 LogFlow(("VBOXGUEST_IOCTL_VMMREQUEST: type %d\n", pReqHdr->requestType));
1911
1912 if (cbReq < cbMinSize)
1913 {
1914 LogRel(("VBOXGUEST_IOCTL_VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
1915 cbReq, cbMinSize, enmType));
1916 return VERR_INVALID_PARAMETER;
1917 }
1918 if (cbReq > cbData)
1919 {
1920 LogRel(("VBOXGUEST_IOCTL_VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
1921 cbData, cbReq, enmType));
1922 return VERR_INVALID_PARAMETER;
1923 }
1924 rc = VbglGRVerify(pReqHdr, cbData);
1925 if (RT_FAILURE(rc))
1926 {
1927 Log(("VBOXGUEST_IOCTL_VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc!!\n",
1928 cbData, cbReq, enmType, rc));
1929 return rc;
1930 }
1931
1932 rc = vbgdCheckIfVmmReqIsAllowed(pDevExt, pSession, enmType, pReqHdr);
1933 if (RT_FAILURE(rc))
1934 {
1935 Log(("VBOXGUEST_IOCTL_VMMREQUEST: Operation not allowed! type=%#x rc=%Rrc\n", enmType, rc));
1936 return rc;
1937 }
1938
1939 /*
1940 * Make a copy of the request in the physical memory heap so
1941 * the VBoxGuestLibrary can more easily deal with the request.
1942 * (This is really a waste of time since the OS or the OS specific
1943 * code has already buffered or locked the input/output buffer, but
1944 * it does makes things a bit simpler wrt to phys address.)
1945 */
1946 rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
1947 if (RT_FAILURE(rc))
1948 {
1949 Log(("VBOXGUEST_IOCTL_VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1950 cbReq, cbReq, rc));
1951 return rc;
1952 }
1953 memcpy(pReqCopy, pReqHdr, cbReq);
1954
1955 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
1956 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
1957
1958 rc = VbglGRPerform(pReqCopy);
1959 if ( RT_SUCCESS(rc)
1960 && RT_SUCCESS(pReqCopy->rc))
1961 {
1962 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
1963 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
1964
1965 memcpy(pReqHdr, pReqCopy, cbReq);
1966 if (pcbDataReturned)
1967 *pcbDataReturned = cbReq;
1968 }
1969 else if (RT_FAILURE(rc))
1970 Log(("VBOXGUEST_IOCTL_VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
1971 else
1972 {
1973 Log(("VBOXGUEST_IOCTL_VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
1974 rc = pReqCopy->rc;
1975 }
1976
1977 VbglGRFree(pReqCopy);
1978 return rc;
1979}
1980
1981
1982#ifdef VBOX_WITH_HGCM
1983
1984AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
1985
1986/** Worker for vbgdHgcmAsyncWaitCallback*. */
1987static int vbgdHgcmAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
1988 bool fInterruptible, uint32_t cMillies)
1989{
1990 int rc;
1991
1992 /*
1993 * Check to see if the condition was met by the time we got here.
1994 *
1995 * We create a simple poll loop here for dealing with out-of-memory
1996 * conditions since the caller isn't necessarily able to deal with
1997 * us returning too early.
1998 */
1999 PVBOXGUESTWAIT pWait;
2000 for (;;)
2001 {
2002 RTSpinlockAcquire(pDevExt->EventSpinlock);
2003 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
2004 {
2005 RTSpinlockRelease(pDevExt->EventSpinlock);
2006 return VINF_SUCCESS;
2007 }
2008 RTSpinlockRelease(pDevExt->EventSpinlock);
2009
2010 pWait = vbgdWaitAlloc(pDevExt, NULL);
2011 if (pWait)
2012 break;
2013 if (fInterruptible)
2014 return VERR_INTERRUPTED;
2015 RTThreadSleep(1);
2016 }
2017 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
2018 pWait->pHGCMReq = pHdr;
2019
2020 /*
2021 * Re-enter the spinlock and re-check for the condition.
2022 * If the condition is met, return.
2023 * Otherwise link us into the HGCM wait list and go to sleep.
2024 */
2025 RTSpinlockAcquire(pDevExt->EventSpinlock);
2026 RTListAppend(&pDevExt->HGCMWaitList, &pWait->ListNode);
2027 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
2028 {
2029 vbgdWaitFreeLocked(pDevExt, pWait);
2030 RTSpinlockRelease(pDevExt->EventSpinlock);
2031 return VINF_SUCCESS;
2032 }
2033 RTSpinlockRelease(pDevExt->EventSpinlock);
2034
2035 if (fInterruptible)
2036 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
2037 else
2038 rc = RTSemEventMultiWait(pWait->Event, cMillies);
2039 if (rc == VERR_SEM_DESTROYED)
2040 return rc;
2041
2042 /*
2043 * Unlink, free and return.
2044 */
2045 if ( RT_FAILURE(rc)
2046 && rc != VERR_TIMEOUT
2047 && ( !fInterruptible
2048 || rc != VERR_INTERRUPTED))
2049 LogRel(("vbgdHgcmAsyncWaitCallback: wait failed! %Rrc\n", rc));
2050
2051 vbgdWaitFreeUnlocked(pDevExt, pWait);
2052 return rc;
2053}
2054
2055
2056/**
2057 * This is a callback for dealing with async waits.
2058 *
2059 * It operates in a manner similar to vbgdIoCtl_WaitEvent.
2060 */
2061static DECLCALLBACK(int) vbgdHgcmAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
2062{
2063 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
2064 LogFlow(("vbgdHgcmAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
2065 return vbgdHgcmAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr, pDevExt,
2066 false /* fInterruptible */, u32User /* cMillies */);
2067}
2068
2069
2070/**
2071 * This is a callback for dealing with async waits with a timeout.
2072 *
2073 * It operates in a manner similar to vbgdIoCtl_WaitEvent.
2074 */
2075static DECLCALLBACK(int) vbgdHgcmAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
2076{
2077 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
2078 LogFlow(("vbgdHgcmAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
2079 return vbgdHgcmAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr, pDevExt,
2080 true /* fInterruptible */, u32User /* cMillies */);
2081}
2082
2083
2084static int vbgdIoCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2085 VBoxGuestHGCMConnectInfo *pInfo, size_t *pcbDataReturned)
2086{
2087 int rc;
2088
2089 /*
2090 * The VbglHGCMConnect call will invoke the callback if the HGCM
2091 * call is performed in an ASYNC fashion. The function is not able
2092 * to deal with cancelled requests.
2093 */
2094 Log(("VBOXGUEST_IOCTL_HGCM_CONNECT: %.128s\n",
2095 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
2096 ? pInfo->Loc.u.host.achName : "<not local host>"));
2097
2098 rc = VbglR0HGCMInternalConnect(pInfo, vbgdHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2099 if (RT_SUCCESS(rc))
2100 {
2101 Log(("VBOXGUEST_IOCTL_HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
2102 pInfo->u32ClientID, pInfo->result, rc));
2103 if (RT_SUCCESS(pInfo->result))
2104 {
2105 /*
2106 * Append the client id to the client id table.
2107 * If the table has somehow become filled up, we'll disconnect the session.
2108 */
2109 unsigned i;
2110 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2111 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2112 if (!pSession->aHGCMClientIds[i])
2113 {
2114 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
2115 break;
2116 }
2117 RTSpinlockRelease(pDevExt->SessionSpinlock);
2118 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
2119 {
2120 VBoxGuestHGCMDisconnectInfo Info;
2121 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
2122 Info.result = 0;
2123 Info.u32ClientID = pInfo->u32ClientID;
2124 VbglR0HGCMInternalDisconnect(&Info, vbgdHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2125 return VERR_TOO_MANY_OPEN_FILES;
2126 }
2127 }
2128 else
2129 rc = pInfo->result;
2130 if (pcbDataReturned)
2131 *pcbDataReturned = sizeof(*pInfo);
2132 }
2133 return rc;
2134}
2135
2136
2137static int vbgdIoCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2138 VBoxGuestHGCMDisconnectInfo *pInfo, size_t *pcbDataReturned)
2139{
2140 /*
2141 * Validate the client id and invalidate its entry while we're in the call.
2142 */
2143 int rc;
2144 const uint32_t u32ClientId = pInfo->u32ClientID;
2145 unsigned i;
2146 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2147 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2148 if (pSession->aHGCMClientIds[i] == u32ClientId)
2149 {
2150 pSession->aHGCMClientIds[i] = UINT32_MAX;
2151 break;
2152 }
2153 RTSpinlockRelease(pDevExt->SessionSpinlock);
2154 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
2155 {
2156 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
2157 return VERR_INVALID_HANDLE;
2158 }
2159
2160 /*
2161 * The VbglHGCMConnect call will invoke the callback if the HGCM
2162 * call is performed in an ASYNC fashion. The function is not able
2163 * to deal with cancelled requests.
2164 */
2165 Log(("VBOXGUEST_IOCTL_HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
2166 rc = VbglR0HGCMInternalDisconnect(pInfo, vbgdHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2167 if (RT_SUCCESS(rc))
2168 {
2169 LogFlow(("VBOXGUEST_IOCTL_HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
2170 if (pcbDataReturned)
2171 *pcbDataReturned = sizeof(*pInfo);
2172 }
2173
2174 /* Update the client id array according to the result. */
2175 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2176 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
2177 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
2178 RTSpinlockRelease(pDevExt->SessionSpinlock);
2179
2180 return rc;
2181}
2182
2183
2184static int vbgdIoCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMCallInfo *pInfo,
2185 uint32_t cMillies, bool fInterruptible, bool f32bit, bool fUserData,
2186 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
2187{
2188 const uint32_t u32ClientId = pInfo->u32ClientID;
2189 uint32_t fFlags;
2190 size_t cbActual;
2191 unsigned i;
2192 int rc;
2193
2194 /*
2195 * Some more validations.
2196 */
2197 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
2198 {
2199 LogRel(("VBOXGUEST_IOCTL_HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
2200 return VERR_INVALID_PARAMETER;
2201 }
2202
2203 cbActual = cbExtra + sizeof(*pInfo);
2204#ifdef RT_ARCH_AMD64
2205 if (f32bit)
2206 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
2207 else
2208#endif
2209 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
2210 if (cbData < cbActual)
2211 {
2212 LogRel(("VBOXGUEST_IOCTL_HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
2213 cbData, cbData, cbActual, cbActual));
2214 return VERR_INVALID_PARAMETER;
2215 }
2216
2217 /*
2218 * Validate the client id.
2219 */
2220 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2221 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2222 if (pSession->aHGCMClientIds[i] == u32ClientId)
2223 break;
2224 RTSpinlockRelease(pDevExt->SessionSpinlock);
2225 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
2226 {
2227 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
2228 return VERR_INVALID_HANDLE;
2229 }
2230
2231 /*
2232 * The VbglHGCMCall call will invoke the callback if the HGCM
2233 * call is performed in an ASYNC fashion. This function can
2234 * deal with cancelled requests, so we let user more requests
2235 * be interruptible (should add a flag for this later I guess).
2236 */
2237 LogFlow(("VBOXGUEST_IOCTL_HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
2238 fFlags = !fUserData && pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
2239 uint32_t cbInfo = (uint32_t)(cbData - cbExtra);
2240#ifdef RT_ARCH_AMD64
2241 if (f32bit)
2242 {
2243 if (fInterruptible)
2244 rc = VbglR0HGCMInternalCall32(pInfo, cbInfo, fFlags, vbgdHgcmAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2245 else
2246 rc = VbglR0HGCMInternalCall32(pInfo, cbInfo, fFlags, vbgdHgcmAsyncWaitCallback, pDevExt, cMillies);
2247 }
2248 else
2249#endif
2250 {
2251 if (fInterruptible)
2252 rc = VbglR0HGCMInternalCall(pInfo, cbInfo, fFlags, vbgdHgcmAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2253 else
2254 rc = VbglR0HGCMInternalCall(pInfo, cbInfo, fFlags, vbgdHgcmAsyncWaitCallback, pDevExt, cMillies);
2255 }
2256 if (RT_SUCCESS(rc))
2257 {
2258 LogFlow(("VBOXGUEST_IOCTL_HGCM_CALL: result=%Rrc\n", pInfo->result));
2259 if (pcbDataReturned)
2260 *pcbDataReturned = cbActual;
2261 }
2262 else
2263 {
2264 if ( rc != VERR_INTERRUPTED
2265 && rc != VERR_TIMEOUT)
2266 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
2267 else
2268 Log(("VBOXGUEST_IOCTL_HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
2269 }
2270 return rc;
2271}
2272
2273#endif /* VBOX_WITH_HGCM */
2274
2275/**
2276 * Handle VBOXGUEST_IOCTL_CHECK_BALLOON from R3.
2277 *
2278 * Ask the host for the size of the balloon and try to set it accordingly. If
2279 * this approach fails because it's not supported, return with fHandleInR3 set
2280 * and let the user land supply memory we can lock via the other ioctl.
2281 *
2282 * @returns VBox status code.
2283 *
2284 * @param pDevExt The device extension.
2285 * @param pSession The session.
2286 * @param pInfo The output buffer.
2287 * @param pcbDataReturned Where to store the amount of returned data. Can
2288 * be NULL.
2289 */
2290static int vbgdIoCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2291 VBoxGuestCheckBalloonInfo *pInfo, size_t *pcbDataReturned)
2292{
2293 VMMDevGetMemBalloonChangeRequest *pReq;
2294 int rc;
2295
2296 LogFlow(("VBOXGUEST_IOCTL_CHECK_BALLOON:\n"));
2297 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2298 AssertRCReturn(rc, rc);
2299
2300 /*
2301 * The first user trying to query/change the balloon becomes the
2302 * owner and owns it until the session is closed (vbgdCloseMemBalloon).
2303 */
2304 if ( pDevExt->MemBalloon.pOwner != pSession
2305 && pDevExt->MemBalloon.pOwner == NULL)
2306 pDevExt->MemBalloon.pOwner = pSession;
2307
2308 if (pDevExt->MemBalloon.pOwner == pSession)
2309 {
2310 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
2311 if (RT_SUCCESS(rc))
2312 {
2313 /*
2314 * This is a response to that event. Setting this bit means that
2315 * we request the value from the host and change the guest memory
2316 * balloon according to this value.
2317 */
2318 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
2319 rc = VbglGRPerform(&pReq->header);
2320 if (RT_SUCCESS(rc))
2321 {
2322 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
2323 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
2324
2325 pInfo->cBalloonChunks = pReq->cBalloonChunks;
2326 pInfo->fHandleInR3 = false;
2327
2328 rc = vbgdSetBalloonSizeKernel(pDevExt, pReq->cBalloonChunks, &pInfo->fHandleInR3);
2329 /* Ignore various out of memory failures. */
2330 if ( rc == VERR_NO_MEMORY
2331 || rc == VERR_NO_PHYS_MEMORY
2332 || rc == VERR_NO_CONT_MEMORY)
2333 rc = VINF_SUCCESS;
2334
2335 if (pcbDataReturned)
2336 *pcbDataReturned = sizeof(VBoxGuestCheckBalloonInfo);
2337 }
2338 else
2339 LogRel(("VBOXGUEST_IOCTL_CHECK_BALLOON: VbglGRPerform failed. rc=%Rrc\n", rc));
2340 VbglGRFree(&pReq->header);
2341 }
2342 }
2343 else
2344 rc = VERR_PERMISSION_DENIED;
2345
2346 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2347 LogFlow(("VBOXGUEST_IOCTL_CHECK_BALLOON returns %Rrc\n", rc));
2348 return rc;
2349}
2350
2351
2352/**
2353 * Handle a request for changing the memory balloon.
2354 *
2355 * @returns VBox status code.
2356 *
2357 * @param pDevExt The device extention.
2358 * @param pSession The session.
2359 * @param pInfo The change request structure (input).
2360 * @param pcbDataReturned Where to store the amount of returned data. Can
2361 * be NULL.
2362 */
2363static int vbgdIoCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2364 VBoxGuestChangeBalloonInfo *pInfo, size_t *pcbDataReturned)
2365{
2366 int rc;
2367 LogFlow(("VBOXGUEST_IOCTL_CHANGE_BALLOON: fInflate=%RTbool u64ChunkAddr=%#RX64\n", pInfo->fInflate, pInfo->u64ChunkAddr));
2368
2369 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2370 AssertRCReturn(rc, rc);
2371
2372 if (!pDevExt->MemBalloon.fUseKernelAPI)
2373 {
2374 /*
2375 * The first user trying to query/change the balloon becomes the
2376 * owner and owns it until the session is closed (vbgdCloseMemBalloon).
2377 */
2378 if ( pDevExt->MemBalloon.pOwner != pSession
2379 && pDevExt->MemBalloon.pOwner == NULL)
2380 pDevExt->MemBalloon.pOwner = pSession;
2381
2382 if (pDevExt->MemBalloon.pOwner == pSession)
2383 {
2384 rc = vbgdSetBalloonSizeFromUser(pDevExt, pSession, pInfo->u64ChunkAddr, !!pInfo->fInflate);
2385 if (pcbDataReturned)
2386 *pcbDataReturned = 0;
2387 }
2388 else
2389 rc = VERR_PERMISSION_DENIED;
2390 }
2391 else
2392 rc = VERR_PERMISSION_DENIED;
2393
2394 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2395 return rc;
2396}
2397
2398
2399/**
2400 * Handle a request for writing a core dump of the guest on the host.
2401 *
2402 * @returns VBox status code.
2403 *
2404 * @param pDevExt The device extension.
2405 * @param pInfo The output buffer.
2406 */
2407static int vbgdIoCtl_WriteCoreDump(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWriteCoreDump *pInfo)
2408{
2409 VMMDevReqWriteCoreDump *pReq = NULL;
2410 int rc;
2411 LogFlow(("VBOXGUEST_IOCTL_WRITE_CORE_DUMP\n"));
2412
2413 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_WriteCoreDump);
2414 if (RT_SUCCESS(rc))
2415 {
2416 pReq->fFlags = pInfo->fFlags;
2417 rc = VbglGRPerform(&pReq->header);
2418 if (RT_FAILURE(rc))
2419 Log(("VBOXGUEST_IOCTL_WRITE_CORE_DUMP: VbglGRPerform failed, rc=%Rrc!\n", rc));
2420
2421 VbglGRFree(&pReq->header);
2422 }
2423 else
2424 Log(("VBOXGUEST_IOCTL_WRITE_CORE_DUMP: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
2425 sizeof(*pReq), sizeof(*pReq), rc));
2426 return rc;
2427}
2428
2429
2430/**
2431 * Guest backdoor logging.
2432 *
2433 * @returns VBox status code.
2434 *
2435 * @param pDevExt The device extension.
2436 * @param pch The log message (need not be NULL terminated).
2437 * @param cbData Size of the buffer.
2438 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2439 */
2440static int vbgdIoCtl_Log(PVBOXGUESTDEVEXT pDevExt, const char *pch, size_t cbData, size_t *pcbDataReturned, bool fUserSession)
2441{
2442 if (pDevExt->fLoggingEnabled)
2443 RTLogBackdoorPrintf("%.*s", cbData, pch);
2444 else if (!fUserSession)
2445 LogRel(("%.*s", cbData, pch));
2446 else
2447 Log(("%.*s", cbData, pch));
2448 if (pcbDataReturned)
2449 *pcbDataReturned = 0;
2450 return VINF_SUCCESS;
2451}
2452
2453
2454/** @name Guest Capabilities, Mouse Status and Event Filter
2455 * @{
2456 */
2457
2458/**
2459 * Clears a bit usage tracker (init time).
2460 *
2461 * @param pTracker The tracker to clear.
2462 */
2463static void vbgdBitUsageTrackerClear(PVBOXGUESTBITUSAGETRACER pTracker)
2464{
2465 uint32_t iBit;
2466 AssertCompile(sizeof(pTracker->acPerBitUsage) == 32 * sizeof(uint32_t));
2467
2468 for (iBit = 0; iBit < 32; iBit++)
2469 pTracker->acPerBitUsage[iBit] = 0;
2470 pTracker->fMask = 0;
2471}
2472
2473
2474#ifdef VBOX_STRICT
2475/**
2476 * Checks that pTracker->fMask is correct and that the usage values are within
2477 * the valid range.
2478 *
2479 * @param pTracker The tracker.
2480 * @param cMax Max valid usage value.
2481 * @param pszWhat Identifies the tracker in assertions.
2482 */
2483static void vbgdBitUsageTrackerCheckMask(PCVBOXGUESTBITUSAGETRACER pTracker, uint32_t cMax, const char *pszWhat)
2484{
2485 uint32_t fMask = 0;
2486 uint32_t iBit;
2487 AssertCompile(sizeof(pTracker->acPerBitUsage) == 32 * sizeof(uint32_t));
2488
2489 for (iBit = 0; iBit < 32; iBit++)
2490 if (pTracker->acPerBitUsage[iBit])
2491 {
2492 fMask |= RT_BIT_32(iBit);
2493 AssertMsg(pTracker->acPerBitUsage[iBit] <= cMax,
2494 ("%s: acPerBitUsage[%u]=%#x cMax=%#x\n", pszWhat, iBit, pTracker->acPerBitUsage[iBit], cMax));
2495 }
2496
2497 AssertMsg(fMask == pTracker->fMask, ("%s: %#x vs %#x\n", pszWhat, fMask, pTracker->fMask));
2498}
2499#endif
2500
2501
2502/**
2503 * Applies a change to the bit usage tracker.
2504 *
2505 *
2506 * @returns true if the mask changed, false if not.
2507 * @param pTracker The bit usage tracker.
2508 * @param fChanged The bits to change.
2509 * @param fPrevious The previous value of the bits.
2510 * @param cMax The max valid usage value for assertions.
2511 * @param pszWhat Identifies the tracker in assertions.
2512 */
2513static bool vbgdBitUsageTrackerChange(PVBOXGUESTBITUSAGETRACER pTracker, uint32_t fChanged, uint32_t fPrevious,
2514 uint32_t cMax, const char *pszWhat)
2515{
2516 bool fGlobalChange = false;
2517 AssertCompile(sizeof(pTracker->acPerBitUsage) == 32 * sizeof(uint32_t));
2518
2519 while (fChanged)
2520 {
2521 uint32_t const iBit = ASMBitFirstSetU32(fChanged) - 1;
2522 uint32_t const fBitMask = RT_BIT_32(iBit);
2523 Assert(iBit < 32); Assert(fBitMask & fChanged);
2524
2525 if (fBitMask & fPrevious)
2526 {
2527 pTracker->acPerBitUsage[iBit] -= 1;
2528 AssertMsg(pTracker->acPerBitUsage[iBit] <= cMax,
2529 ("%s: acPerBitUsage[%u]=%#x cMax=%#x\n", pszWhat, iBit, pTracker->acPerBitUsage[iBit], cMax));
2530 if (pTracker->acPerBitUsage[iBit] == 0)
2531 {
2532 fGlobalChange = true;
2533 pTracker->fMask &= ~fBitMask;
2534 }
2535 }
2536 else
2537 {
2538 pTracker->acPerBitUsage[iBit] += 1;
2539 AssertMsg(pTracker->acPerBitUsage[iBit] > 0 && pTracker->acPerBitUsage[iBit] <= cMax,
2540 ("pTracker->acPerBitUsage[%u]=%#x cMax=%#x\n", pszWhat, iBit, pTracker->acPerBitUsage[iBit], cMax));
2541 if (pTracker->acPerBitUsage[iBit] == 1)
2542 {
2543 fGlobalChange = true;
2544 pTracker->fMask |= fBitMask;
2545 }
2546 }
2547
2548 fChanged &= ~fBitMask;
2549 }
2550
2551#ifdef VBOX_STRICT
2552 vbgdBitUsageTrackerCheckMask(pTracker, cMax, pszWhat);
2553#endif
2554 NOREF(pszWhat); NOREF(cMax);
2555 return fGlobalChange;
2556}
2557
2558
2559/**
2560 * Init and termination worker for resetting the (host) event filter on the host
2561 *
2562 * @returns VBox status code.
2563 * @param pDevExt The device extension.
2564 * @param fFixedEvents Fixed events (init time).
2565 */
2566static int vbgdResetEventFilterOnHost(PVBOXGUESTDEVEXT pDevExt, uint32_t fFixedEvents)
2567{
2568 VMMDevCtlGuestFilterMask *pReq;
2569 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
2570 if (RT_SUCCESS(rc))
2571 {
2572 pReq->u32NotMask = UINT32_MAX & ~fFixedEvents;
2573 pReq->u32OrMask = fFixedEvents;
2574 rc = VbglGRPerform(&pReq->header);
2575 if (RT_FAILURE(rc))
2576 LogRelFunc(("failed with rc=%Rrc\n", rc));
2577 VbglGRFree(&pReq->header);
2578 }
2579 return rc;
2580}
2581
2582
2583/**
2584 * Changes the event filter mask for the given session.
2585 *
2586 * This is called in response to VBOXGUEST_IOCTL_CTL_FILTER_MASK as well as to
2587 * do session cleanup.
2588 *
2589 * @returns VBox status code.
2590 * @param pDevExt The device extension.
2591 * @param pSession The session.
2592 * @param fOrMask The events to add.
2593 * @param fNotMask The events to remove.
2594 * @param fSessionTermination Set if we're called by the session cleanup code.
2595 * This tweaks the error handling so we perform
2596 * proper session cleanup even if the host
2597 * misbehaves.
2598 *
2599 * @remarks Takes the session spinlock.
2600 */
2601static int vbgdSetSessionEventFilter(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2602 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination)
2603{
2604 VMMDevCtlGuestFilterMask *pReq;
2605 uint32_t fChanged;
2606 uint32_t fPrevious;
2607 int rc;
2608
2609 /*
2610 * Preallocate a request buffer so we can do all in one go without leaving the spinlock.
2611 */
2612 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
2613 if (RT_SUCCESS(rc))
2614 { /* nothing */ }
2615 else if (!fSessionTermination)
2616 {
2617 LogRel(("vbgdSetSessionFilterMask: VbglGRAlloc failure: %Rrc\n", rc));
2618 return rc;
2619 }
2620 else
2621 pReq = NULL; /* Ignore failure, we must do session cleanup. */
2622
2623
2624 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2625
2626 /*
2627 * Apply the changes to the session mask.
2628 */
2629 fPrevious = pSession->fEventFilter;
2630 pSession->fEventFilter |= fOrMask;
2631 pSession->fEventFilter &= fNotMask;
2632
2633 /*
2634 * If anything actually changed, update the global usage counters.
2635 */
2636 fChanged = fPrevious ^ pSession->fEventFilter;
2637 if (fChanged)
2638 {
2639 bool fGlobalChange = vbgdBitUsageTrackerChange(&pDevExt->EventFilterTracker, fChanged, fPrevious,
2640 pDevExt->cSessions, "EventFilterTracker");
2641
2642 /*
2643 * If there are global changes, update the event filter on the host.
2644 */
2645 if (fGlobalChange || pDevExt->fEventFilterHost == UINT32_MAX)
2646 {
2647 Assert(pReq || fSessionTermination);
2648 if (pReq)
2649 {
2650 pReq->u32OrMask = pDevExt->fFixedEvents | pDevExt->EventFilterTracker.fMask;
2651 if (pReq->u32OrMask == pDevExt->fEventFilterHost)
2652 rc = VINF_SUCCESS;
2653 else
2654 {
2655 pDevExt->fEventFilterHost = pReq->u32OrMask;
2656 pReq->u32NotMask = ~pReq->u32OrMask;
2657 rc = VbglGRPerform(&pReq->header);
2658 if (RT_FAILURE(rc))
2659 {
2660 /*
2661 * Failed, roll back (unless it's session termination time).
2662 */
2663 pDevExt->fEventFilterHost = UINT32_MAX;
2664 if (!fSessionTermination)
2665 {
2666 vbgdBitUsageTrackerChange(&pDevExt->EventFilterTracker, fChanged, pSession->fEventFilter,
2667 pDevExt->cSessions, "EventFilterTracker");
2668 pSession->fEventFilter = fPrevious;
2669 }
2670 }
2671 }
2672 }
2673 else
2674 rc = VINF_SUCCESS;
2675 }
2676 }
2677
2678 RTSpinlockRelease(pDevExt->SessionSpinlock);
2679 if (pReq)
2680 VbglGRFree(&pReq->header);
2681 return rc;
2682}
2683
2684
2685/**
2686 * Handle VBOXGUEST_IOCTL_CTL_FILTER_MASK.
2687 *
2688 * @returns VBox status code.
2689 *
2690 * @param pDevExt The device extension.
2691 * @param pSession The session.
2692 * @param pInfo The request.
2693 */
2694static int vbgdIoCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestFilterMaskInfo *pInfo)
2695{
2696 LogFlow(("VBOXGUEST_IOCTL_CTL_FILTER_MASK: or=%#x not=%#x\n", pInfo->u32OrMask, pInfo->u32NotMask));
2697
2698 if ((pInfo->u32OrMask | pInfo->u32NotMask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
2699 {
2700 Log(("VBOXGUEST_IOCTL_CTL_FILTER_MASK: or=%#x not=%#x: Invalid masks!\n", pInfo->u32OrMask, pInfo->u32NotMask));
2701 return VERR_INVALID_PARAMETER;
2702 }
2703
2704 return vbgdSetSessionEventFilter(pDevExt, pSession, pInfo->u32OrMask, pInfo->u32NotMask, false /*fSessionTermination*/);
2705}
2706
2707
2708/**
2709 * Init and termination worker for set mouse feature status to zero on the host.
2710 *
2711 * @returns VBox status code.
2712 * @param pDevExt The device extension.
2713 */
2714static int vbgdResetMouseStatusOnHost(PVBOXGUESTDEVEXT pDevExt)
2715{
2716 VMMDevReqMouseStatus *pReq;
2717 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetMouseStatus);
2718 if (RT_SUCCESS(rc))
2719 {
2720 pReq->mouseFeatures = 0;
2721 pReq->pointerXPos = 0;
2722 pReq->pointerYPos = 0;
2723 rc = VbglGRPerform(&pReq->header);
2724 if (RT_FAILURE(rc))
2725 LogRelFunc(("failed with rc=%Rrc\n", rc));
2726 VbglGRFree(&pReq->header);
2727 }
2728 return rc;
2729}
2730
2731
2732/**
2733 * Changes the mouse status mask for the given session.
2734 *
2735 * This is called in response to VBOXGUEST_IOCTL_SET_MOUSE_STATUS as well as to
2736 * do session cleanup.
2737 *
2738 * @returns VBox status code.
2739 * @param pDevExt The device extension.
2740 * @param pSession The session.
2741 * @param fOrMask The status flags to add.
2742 * @param fNotMask The status flags to remove.
2743 * @param fSessionTermination Set if we're called by the session cleanup code.
2744 * This tweaks the error handling so we perform
2745 * proper session cleanup even if the host
2746 * misbehaves.
2747 *
2748 * @remarks Takes the session spinlock.
2749 */
2750static int vbgdSetSessionMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2751 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination)
2752{
2753 VMMDevReqMouseStatus *pReq;
2754 uint32_t fChanged;
2755 uint32_t fPrevious;
2756 int rc;
2757
2758 /*
2759 * Preallocate a request buffer so we can do all in one go without leaving the spinlock.
2760 */
2761 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetMouseStatus);
2762 if (RT_SUCCESS(rc))
2763 { /* nothing */ }
2764 else if (!fSessionTermination)
2765 {
2766 LogRel(("vbgdSetSessionMouseStatus: VbglGRAlloc failure: %Rrc\n", rc));
2767 return rc;
2768 }
2769 else
2770 pReq = NULL; /* Ignore failure, we must do session cleanup. */
2771
2772
2773 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2774
2775 /*
2776 * Apply the changes to the session mask.
2777 */
2778 fPrevious = pSession->fMouseStatus;
2779 pSession->fMouseStatus |= fOrMask;
2780 pSession->fMouseStatus &= fNotMask;
2781
2782 /*
2783 * If anything actually changed, update the global usage counters.
2784 */
2785 fChanged = fPrevious ^ pSession->fMouseStatus;
2786 if (fChanged)
2787 {
2788 bool fGlobalChange = vbgdBitUsageTrackerChange(&pDevExt->MouseStatusTracker, fChanged, fPrevious,
2789 pDevExt->cSessions, "MouseStatusTracker");
2790
2791 /*
2792 * If there are global changes, update the event filter on the host.
2793 */
2794 if (fGlobalChange || pDevExt->fMouseStatusHost == UINT32_MAX)
2795 {
2796 Assert(pReq || fSessionTermination);
2797 if (pReq)
2798 {
2799 pReq->mouseFeatures = pDevExt->MouseStatusTracker.fMask;
2800 if (pReq->mouseFeatures == pDevExt->fMouseStatusHost)
2801 rc = VINF_SUCCESS;
2802 else
2803 {
2804 pDevExt->fMouseStatusHost = pReq->mouseFeatures;
2805 pReq->pointerXPos = 0;
2806 pReq->pointerYPos = 0;
2807 rc = VbglGRPerform(&pReq->header);
2808 if (RT_FAILURE(rc))
2809 {
2810 /*
2811 * Failed, roll back (unless it's session termination time).
2812 */
2813 pDevExt->fMouseStatusHost = UINT32_MAX;
2814 if (!fSessionTermination)
2815 {
2816 vbgdBitUsageTrackerChange(&pDevExt->MouseStatusTracker, fChanged, pSession->fMouseStatus,
2817 pDevExt->cSessions, "MouseStatusTracker");
2818 pSession->fMouseStatus = fPrevious;
2819 }
2820 }
2821 }
2822 }
2823 else
2824 rc = VINF_SUCCESS;
2825 }
2826 }
2827
2828 RTSpinlockRelease(pDevExt->SessionSpinlock);
2829 if (pReq)
2830 VbglGRFree(&pReq->header);
2831 return rc;
2832}
2833
2834
2835/**
2836 * Sets the mouse status features for this session and updates them globally.
2837 *
2838 * @returns VBox status code.
2839 *
2840 * @param pDevExt The device extention.
2841 * @param pSession The session.
2842 * @param fFeatures New bitmap of enabled features.
2843 */
2844static int vbgdIoCtl_SetMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fFeatures)
2845{
2846 LogFlow(("VBOXGUEST_IOCTL_SET_MOUSE_STATUS: features=%#x\n", fFeatures));
2847
2848 if (fFeatures & ~VMMDEV_MOUSE_GUEST_MASK)
2849 return VERR_INVALID_PARAMETER;
2850
2851 return vbgdSetSessionMouseStatus(pDevExt, pSession, fFeatures, ~fFeatures, false /*fSessionTermination*/);
2852}
2853
2854
2855/**
2856 * Return the mask of VMM device events that this session is allowed to see (wrt
2857 * to "acquire" mode guest capabilities).
2858 *
2859 * The events associated with guest capabilities in "acquire" mode will be
2860 * restricted to sessions which has acquired the respective capabilities.
2861 * If someone else tries to wait for acquired events, they won't be woken up
2862 * when the event becomes pending. Should some other thread in the session
2863 * acquire the capability while the corresponding event is pending, the waiting
2864 * thread will woken up.
2865 *
2866 * @returns Mask of events valid for the given session.
2867 * @param pDevExt The device extension.
2868 * @param pSession The session.
2869 *
2870 * @remarks Needs only be called when dispatching events in the
2871 * VBOXGUEST_ACQUIRE_STYLE_EVENTS mask.
2872 */
2873static uint32_t vbgdGetAllowedEventMaskForSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2874{
2875 uint32_t fAcquireModeGuestCaps;
2876 uint32_t fAcquiredGuestCaps;
2877 uint32_t fAllowedEvents;
2878
2879 /*
2880 * Note! Reads pSession->fAcquiredGuestCaps and pDevExt->fAcquireModeGuestCaps
2881 * WITHOUT holding VBOXGUESTDEVEXT::SessionSpinlock.
2882 */
2883 fAcquireModeGuestCaps = ASMAtomicUoReadU32(&pDevExt->fAcquireModeGuestCaps);
2884 if (fAcquireModeGuestCaps == 0)
2885 return VMMDEV_EVENT_VALID_EVENT_MASK;
2886 fAcquiredGuestCaps = ASMAtomicUoReadU32(&pSession->fAcquiredGuestCaps);
2887
2888 /*
2889 * Calculate which events to allow according to the cap config and caps
2890 * acquired by the session.
2891 */
2892 fAllowedEvents = VMMDEV_EVENT_VALID_EVENT_MASK;
2893 if ( !(fAcquiredGuestCaps & VMMDEV_GUEST_SUPPORTS_GRAPHICS)
2894 && (fAcquireModeGuestCaps & VMMDEV_GUEST_SUPPORTS_GRAPHICS))
2895 fAllowedEvents &= ~VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST;
2896
2897 if ( !(fAcquiredGuestCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
2898 && (fAcquireModeGuestCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS))
2899 fAllowedEvents &= ~VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
2900
2901 return fAllowedEvents;
2902}
2903
2904
2905/**
2906 * Init and termination worker for set guest capabilities to zero on the host.
2907 *
2908 * @returns VBox status code.
2909 * @param pDevExt The device extension.
2910 */
2911static int vbgdResetCapabilitiesOnHost(PVBOXGUESTDEVEXT pDevExt)
2912{
2913 VMMDevReqGuestCapabilities2 *pReq;
2914 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
2915 if (RT_SUCCESS(rc))
2916 {
2917 pReq->u32NotMask = UINT32_MAX;
2918 pReq->u32OrMask = 0;
2919 rc = VbglGRPerform(&pReq->header);
2920
2921 if (RT_FAILURE(rc))
2922 LogRelFunc(("failed with rc=%Rrc\n", rc));
2923 VbglGRFree(&pReq->header);
2924 }
2925 return rc;
2926}
2927
2928
2929/**
2930 * Sets the guest capabilities to the host while holding the lock.
2931 *
2932 * This will ASSUME that we're the ones in charge of the mask, so
2933 * we'll simply clear all bits we don't set.
2934 *
2935 * @returns VBox status code.
2936 * @param fMask The new mask.
2937 */
2938static int vbgdUpdateCapabilitiesOnHostWithReqAndLock(PVBOXGUESTDEVEXT pDevExt, VMMDevReqGuestCapabilities2 *pReq)
2939{
2940 int rc;
2941
2942 pReq->u32OrMask = pDevExt->fAcquiredGuestCaps | pDevExt->SetGuestCapsTracker.fMask;
2943 if (pReq->u32OrMask == pDevExt->fGuestCapsHost)
2944 rc = VINF_SUCCESS;
2945 else
2946 {
2947 pDevExt->fGuestCapsHost = pReq->u32OrMask;
2948 pReq->u32NotMask = ~pReq->u32OrMask;
2949 rc = VbglGRPerform(&pReq->header);
2950 if (RT_FAILURE(rc))
2951 pDevExt->fGuestCapsHost = UINT32_MAX;
2952 }
2953
2954 return rc;
2955}
2956
2957
2958/**
2959 * Switch a set of capabilities into "acquire" mode and (maybe) acquire them for
2960 * the given session.
2961 *
2962 * This is called in response to VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE as well as
2963 * to do session cleanup.
2964 *
2965 * @returns VBox status code.
2966 * @param pDevExt The device extension.
2967 * @param pSession The session.
2968 * @param fOrMask The capabilities to add .
2969 * @param fNotMask The capabilities to remove. Ignored in
2970 * VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE.
2971 * @param enmFlags Confusing operation modifier.
2972 * VBOXGUESTCAPSACQUIRE_FLAGS_NONE means to both
2973 * configure and acquire/release the capabilities.
2974 * VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE
2975 * means only configure capabilities in the
2976 * @a fOrMask capabilities for "acquire" mode.
2977 * @param fSessionTermination Set if we're called by the session cleanup code.
2978 * This tweaks the error handling so we perform
2979 * proper session cleanup even if the host
2980 * misbehaves.
2981 *
2982 * @remarks Takes both the session and event spinlocks.
2983 */
2984static int vbgdAcquireSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2985 uint32_t fOrMask, uint32_t fNotMask, VBOXGUESTCAPSACQUIRE_FLAGS enmFlags,
2986 bool fSessionTermination)
2987{
2988 uint32_t fCurrentOwnedCaps;
2989 uint32_t fSessionRemovedCaps;
2990 uint32_t fSessionAddedCaps;
2991 uint32_t fOtherConflictingCaps;
2992 VMMDevReqGuestCapabilities2 *pReq = NULL;
2993 int rc;
2994
2995
2996 /*
2997 * Validate and adjust input.
2998 */
2999 if (fOrMask & ~( VMMDEV_GUEST_SUPPORTS_SEAMLESS
3000 | VMMDEV_GUEST_SUPPORTS_GUEST_HOST_WINDOW_MAPPING
3001 | VMMDEV_GUEST_SUPPORTS_GRAPHICS ) )
3002 {
3003 LogRel(("vbgdAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x enmFlags=%#x -- invalid fOrMask\n",
3004 pSession, fOrMask, fNotMask, enmFlags));
3005 return VERR_INVALID_PARAMETER;
3006 }
3007
3008 if ( enmFlags != VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE
3009 && enmFlags != VBOXGUESTCAPSACQUIRE_FLAGS_NONE)
3010 {
3011 LogRel(("vbgdAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x enmFlags=%#x: invalid enmFlags %d\n",
3012 pSession, fOrMask, fNotMask, enmFlags));
3013 return VERR_INVALID_PARAMETER;
3014 }
3015 Assert(!fOrMask || !fSessionTermination);
3016
3017 /* The fNotMask no need to have all values valid, invalid ones will simply be ignored. */
3018 fNotMask &= ~fOrMask;
3019
3020 /*
3021 * Preallocate a update request if we're about to do more than just configure
3022 * the capability mode.
3023 */
3024 if (enmFlags != VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE)
3025 {
3026 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
3027 if (RT_SUCCESS(rc))
3028 { /* do nothing */ }
3029 else if (!fSessionTermination)
3030 {
3031 LogRel(("vbgdAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x enmFlags=%#x: VbglGRAlloc failure: %Rrc\n",
3032 pSession, fOrMask, fNotMask, enmFlags, rc));
3033 return rc;
3034 }
3035 else
3036 pReq = NULL; /* Ignore failure, we must do session cleanup. */
3037 }
3038
3039 /*
3040 * Try switch the capabilities in the OR mask into "acquire" mode.
3041 *
3042 * Note! We currently ignore anyone which may already have "set" the capabilities
3043 * in fOrMask. Perhaps not the best way to handle it, but it's simple...
3044 */
3045 RTSpinlockAcquire(pDevExt->EventSpinlock);
3046
3047 if (!(pDevExt->fSetModeGuestCaps & fOrMask))
3048 pDevExt->fAcquireModeGuestCaps |= fOrMask;
3049 else
3050 {
3051 RTSpinlockRelease(pDevExt->EventSpinlock);
3052
3053 if (pReq)
3054 VbglGRFree(&pReq->header);
3055 AssertMsgFailed(("Trying to change caps mode: %#x\n", fOrMask));
3056 LogRel(("vbgdAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x enmFlags=%#x: calling caps acquire for set caps\n",
3057 pSession, fOrMask, fNotMask, enmFlags));
3058 return VERR_INVALID_STATE;
3059 }
3060
3061 /*
3062 * If we only wanted to switch the capabilities into "acquire" mode, we're done now.
3063 */
3064 if (enmFlags & VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE)
3065 {
3066 RTSpinlockRelease(pDevExt->EventSpinlock);
3067
3068 Assert(!pReq);
3069 Log(("vbgdAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x enmFlags=%#x: configured acquire caps: 0x%x\n",
3070 pSession, fOrMask, fNotMask, enmFlags));
3071 return VINF_SUCCESS;
3072 }
3073 Assert(pReq || fSessionTermination);
3074
3075 /*
3076 * Caller wants to acquire/release the capabilities too.
3077 *
3078 * Note! The mode change of the capabilities above won't be reverted on
3079 * failure, this is intentional.
3080 */
3081 fCurrentOwnedCaps = pSession->fAcquiredGuestCaps;
3082 fSessionRemovedCaps = fCurrentOwnedCaps & fNotMask;
3083 fSessionAddedCaps = fOrMask & ~fCurrentOwnedCaps;
3084 fOtherConflictingCaps = pDevExt->fAcquiredGuestCaps & ~fCurrentOwnedCaps;
3085 fOtherConflictingCaps &= fSessionAddedCaps;
3086
3087 if (!fOtherConflictingCaps)
3088 {
3089 if (fSessionAddedCaps)
3090 {
3091 pSession->fAcquiredGuestCaps |= fSessionAddedCaps;
3092 pDevExt->fAcquiredGuestCaps |= fSessionAddedCaps;
3093 }
3094
3095 if (fSessionRemovedCaps)
3096 {
3097 pSession->fAcquiredGuestCaps &= ~fSessionRemovedCaps;
3098 pDevExt->fAcquiredGuestCaps &= ~fSessionRemovedCaps;
3099 }
3100
3101 /*
3102 * If something changes (which is very likely), tell the host.
3103 */
3104 if (fSessionAddedCaps || fSessionRemovedCaps || pDevExt->fGuestCapsHost == UINT32_MAX)
3105 {
3106 Assert(pReq || fSessionTermination);
3107 if (pReq)
3108 {
3109 rc = vbgdUpdateCapabilitiesOnHostWithReqAndLock(pDevExt, pReq);
3110 if (RT_FAILURE(rc) && !fSessionTermination)
3111 {
3112 /* Failed, roll back. */
3113 if (fSessionAddedCaps)
3114 {
3115 pSession->fAcquiredGuestCaps &= ~fSessionAddedCaps;
3116 pDevExt->fAcquiredGuestCaps &= ~fSessionAddedCaps;
3117 }
3118 if (fSessionRemovedCaps)
3119 {
3120 pSession->fAcquiredGuestCaps |= fSessionRemovedCaps;
3121 pDevExt->fAcquiredGuestCaps |= fSessionRemovedCaps;
3122 }
3123
3124 RTSpinlockRelease(pDevExt->EventSpinlock);
3125 LogRel(("vbgdAcquireSessionCapabilities: vbgdUpdateCapabilitiesOnHostWithReqAndLock failed: rc=%Rrc\n", rc));
3126 VbglGRFree(&pReq->header);
3127 return rc;
3128 }
3129 }
3130 }
3131 }
3132 else
3133 {
3134 RTSpinlockRelease(pDevExt->EventSpinlock);
3135
3136 Log(("vbgdAcquireSessionCapabilities: Caps %#x were busy\n", fOtherConflictingCaps));
3137 VbglGRFree(&pReq->header);
3138 return VERR_RESOURCE_BUSY;
3139 }
3140
3141 RTSpinlockRelease(pDevExt->EventSpinlock);
3142 if (pReq)
3143 VbglGRFree(&pReq->header);
3144
3145 /*
3146 * If we added a capability, check if that means some other thread in our
3147 * session should be unblocked because there are events pending.
3148 *
3149 * HACK ALERT! When the seamless support capability is added we generate a
3150 * seamless change event so that the ring-3 client can sync with
3151 * the seamless state. Although this introduces a spurious
3152 * wakeups of the ring-3 client, it solves the problem of client
3153 * state inconsistency in multiuser environment (on Windows).
3154 */
3155 if (fSessionAddedCaps)
3156 {
3157 uint32_t fGenFakeEvents = 0;
3158 if (fSessionAddedCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
3159 fGenFakeEvents |= VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
3160
3161 RTSpinlockAcquire(pDevExt->EventSpinlock);
3162 if (fGenFakeEvents || pDevExt->f32PendingEvents)
3163 vbgdDispatchEventsLocked(pDevExt, fGenFakeEvents);
3164 RTSpinlockRelease(pDevExt->EventSpinlock);
3165
3166#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
3167 VbgdCommonWaitDoWakeUps(pDevExt);
3168#endif
3169 }
3170
3171 return VINF_SUCCESS;
3172}
3173
3174
3175/**
3176 * Handle VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE.
3177 *
3178 * @returns VBox status code.
3179 *
3180 * @param pDevExt The device extension.
3181 * @param pSession The session.
3182 * @param pAcquire The request.
3183 */
3184static int vbgdIoCtl_GuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestCapsAquire *pAcquire)
3185{
3186 int rc;
3187 LogFlow(("VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE: or=%#x not=%#x flags=%#x\n",
3188 pAcquire->u32OrMask, pAcquire->u32NotMask, pAcquire->enmFlags));
3189
3190 rc = vbgdAcquireSessionCapabilities(pDevExt, pSession, pAcquire->u32OrMask, pAcquire->u32NotMask, pAcquire->enmFlags,
3191 false /*fSessionTermination*/);
3192 if (RT_FAILURE(rc))
3193 LogRel(("VbgdCommonIoCtl: GUEST_CAPS_ACQUIRE failed rc=%Rrc\n", rc));
3194 pAcquire->rc = rc;
3195 return VINF_SUCCESS;
3196}
3197
3198
3199/**
3200 * Sets the guest capabilities for a session.
3201 *
3202 * @returns VBox status code.
3203 * @param pDevExt The device extension.
3204 * @param pSession The session.
3205 * @param fOrMask The capabilities to add.
3206 * @param fNotMask The capabilities to remove.
3207 * @param fSessionTermination Set if we're called by the session cleanup code.
3208 * This tweaks the error handling so we perform
3209 * proper session cleanup even if the host
3210 * misbehaves.
3211 *
3212 * @remarks Takes the session spinlock.
3213 */
3214static int vbgdSetSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
3215 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination)
3216{
3217 /*
3218 * Preallocate a request buffer so we can do all in one go without leaving the spinlock.
3219 */
3220 VMMDevReqGuestCapabilities2 *pReq;
3221 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
3222 if (RT_SUCCESS(rc))
3223 { /* nothing */ }
3224 else if (!fSessionTermination)
3225 {
3226 LogRel(("vbgdSetSessionCapabilities: VbglGRAlloc failure: %Rrc\n", rc));
3227 return rc;
3228 }
3229 else
3230 pReq = NULL; /* Ignore failure, we must do session cleanup. */
3231
3232
3233 RTSpinlockAcquire(pDevExt->SessionSpinlock);
3234
3235#ifndef VBOXGUEST_DISREGARD_ACQUIRE_MODE_GUEST_CAPS
3236 /*
3237 * Capabilities in "acquire" mode cannot be set via this API.
3238 * (Acquire mode is only used on windows at the time of writing.)
3239 */
3240 if (!(fOrMask & pDevExt->fAcquireModeGuestCaps))
3241#endif
3242 {
3243 /*
3244 * Apply the changes to the session mask.
3245 */
3246 uint32_t fChanged;
3247 uint32_t fPrevious = pSession->fCapabilities;
3248 pSession->fCapabilities |= fOrMask;
3249 pSession->fCapabilities &= ~fNotMask;
3250
3251 /*
3252 * If anything actually changed, update the global usage counters.
3253 */
3254 fChanged = fPrevious ^ pSession->fCapabilities;
3255 if (fChanged)
3256 {
3257 bool fGlobalChange = vbgdBitUsageTrackerChange(&pDevExt->SetGuestCapsTracker, fChanged, fPrevious,
3258 pDevExt->cSessions, "SetGuestCapsTracker");
3259
3260 /*
3261 * If there are global changes, update the capabilities on the host.
3262 */
3263 if (fGlobalChange || pDevExt->fGuestCapsHost == UINT32_MAX)
3264 {
3265 Assert(pReq || fSessionTermination);
3266 if (pReq)
3267 {
3268 rc = vbgdUpdateCapabilitiesOnHostWithReqAndLock(pDevExt, pReq);
3269
3270 /* On failure, roll back (unless it's session termination time). */
3271 if (RT_FAILURE(rc) && !fSessionTermination)
3272 {
3273 vbgdBitUsageTrackerChange(&pDevExt->SetGuestCapsTracker, fChanged, pSession->fCapabilities,
3274 pDevExt->cSessions, "SetGuestCapsTracker");
3275 pSession->fCapabilities = fPrevious;
3276 }
3277 }
3278 }
3279 }
3280 }
3281#ifndef VBOXGUEST_DISREGARD_ACQUIRE_MODE_GUEST_CAPS
3282 else
3283 rc = VERR_RESOURCE_BUSY;
3284#endif
3285
3286 RTSpinlockRelease(pDevExt->SessionSpinlock);
3287 if (pReq)
3288 VbglGRFree(&pReq->header);
3289 return rc;
3290}
3291
3292
3293/**
3294 * Handle VBOXGUEST_IOCTL_SET_GUEST_CAPABILITIES.
3295 *
3296 * @returns VBox status code.
3297 *
3298 * @param pDevExt The device extension.
3299 * @param pSession The session.
3300 * @param pInfo The request.
3301 */
3302static int vbgdIoCtl_SetCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestSetCapabilitiesInfo *pInfo)
3303{
3304 int rc;
3305 LogFlow(("VBOXGUEST_IOCTL_SET_GUEST_CAPABILITIES: or=%#x not=%#x\n", pInfo->u32OrMask, pInfo->u32NotMask));
3306
3307 if (!((pInfo->u32OrMask | pInfo->u32NotMask) & ~VMMDEV_GUEST_CAPABILITIES_MASK))
3308 rc = vbgdSetSessionCapabilities(pDevExt, pSession, pInfo->u32OrMask, pInfo->u32NotMask, false /*fSessionTermination*/);
3309 else
3310 rc = VERR_INVALID_PARAMETER;
3311
3312 return rc;
3313}
3314
3315/** @} */
3316
3317
3318/**
3319 * Common IOCtl for user to kernel and kernel to kernel communication.
3320 *
3321 * This function only does the basic validation and then invokes
3322 * worker functions that takes care of each specific function.
3323 *
3324 * @returns VBox status code.
3325 *
3326 * @param iFunction The requested function.
3327 * @param pDevExt The device extension.
3328 * @param pSession The client session.
3329 * @param pvData The input/output data buffer. Can be NULL depending on the function.
3330 * @param cbData The max size of the data buffer.
3331 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
3332 */
3333int VbgdCommonIoCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
3334 void *pvData, size_t cbData, size_t *pcbDataReturned)
3335{
3336 int rc;
3337 LogFlow(("VbgdCommonIoCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
3338 iFunction, pDevExt, pSession, pvData, cbData));
3339
3340 /*
3341 * Make sure the returned data size is set to zero.
3342 */
3343 if (pcbDataReturned)
3344 *pcbDataReturned = 0;
3345
3346 /*
3347 * Define some helper macros to simplify validation.
3348 */
3349#define CHECKRET_RING0(mnemonic) \
3350 do { \
3351 if (pSession->R0Process != NIL_RTR0PROCESS) \
3352 { \
3353 LogFunc((mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
3354 pSession->Process, (uintptr_t)pSession->R0Process)); \
3355 return VERR_PERMISSION_DENIED; \
3356 } \
3357 } while (0)
3358#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
3359 do { \
3360 if (cbData < (cbMin)) \
3361 { \
3362 LogFunc((mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
3363 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
3364 return VERR_BUFFER_OVERFLOW; \
3365 } \
3366 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
3367 { \
3368 LogFunc((mnemonic ": Invalid pointer %p\n", pvData)); \
3369 return VERR_INVALID_POINTER; \
3370 } \
3371 } while (0)
3372#define CHECKRET_SIZE(mnemonic, cb) \
3373 do { \
3374 if (cbData != (cb)) \
3375 { \
3376 LogFunc((mnemonic ": cbData=%#zx (%zu) expected is %#zx (%zu)\n", \
3377 cbData, cbData, (size_t)(cb), (size_t)(cb))); \
3378 return VERR_BUFFER_OVERFLOW; \
3379 } \
3380 if ((cb) != 0 && !VALID_PTR(pvData)) \
3381 { \
3382 LogFunc((mnemonic ": Invalid pointer %p\n", pvData)); \
3383 return VERR_INVALID_POINTER; \
3384 } \
3385 } while (0)
3386
3387
3388 /*
3389 * Deal with variably sized requests first.
3390 */
3391 rc = VINF_SUCCESS;
3392 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
3393 {
3394 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
3395 rc = vbgdIoCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
3396 }
3397#ifdef VBOX_WITH_HGCM
3398 /*
3399 * These ones are a bit tricky.
3400 */
3401 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
3402 {
3403 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
3404 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
3405 rc = vbgdIoCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
3406 fInterruptible, false /*f32bit*/, false /* fUserData */,
3407 0, cbData, pcbDataReturned);
3408 }
3409 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
3410 {
3411 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
3412 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
3413 rc = vbgdIoCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
3414 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
3415 false /*f32bit*/, false /* fUserData */,
3416 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
3417 }
3418 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_USERDATA(0)))
3419 {
3420 bool fInterruptible = true;
3421 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
3422 rc = vbgdIoCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
3423 fInterruptible, false /*f32bit*/, true /* fUserData */,
3424 0, cbData, pcbDataReturned);
3425 }
3426# ifdef RT_ARCH_AMD64
3427 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
3428 {
3429 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
3430 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
3431 rc = vbgdIoCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
3432 fInterruptible, true /*f32bit*/, false /* fUserData */,
3433 0, cbData, pcbDataReturned);
3434 }
3435 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
3436 {
3437 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
3438 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
3439 rc = vbgdIoCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
3440 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
3441 true /*f32bit*/, false /* fUserData */,
3442 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
3443 }
3444# endif
3445#endif /* VBOX_WITH_HGCM */
3446 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
3447 {
3448 CHECKRET_MIN_SIZE("LOG", 1);
3449 rc = vbgdIoCtl_Log(pDevExt, (char *)pvData, cbData, pcbDataReturned, pSession->fUserSession);
3450 }
3451 else
3452 {
3453 switch (iFunction)
3454 {
3455 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
3456 CHECKRET_RING0("GETVMMDEVPORT");
3457 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
3458 rc = vbgdIoCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
3459 break;
3460
3461#ifndef RT_OS_WINDOWS /* Windows has its own implementation of this. */
3462 case VBOXGUEST_IOCTL_SET_MOUSE_NOTIFY_CALLBACK:
3463 CHECKRET_RING0("SET_MOUSE_NOTIFY_CALLBACK");
3464 CHECKRET_SIZE("SET_MOUSE_NOTIFY_CALLBACK", sizeof(VBoxGuestMouseSetNotifyCallback));
3465 rc = vbgdIoCtl_SetMouseNotifyCallback(pDevExt, (VBoxGuestMouseSetNotifyCallback *)pvData);
3466 break;
3467#endif
3468
3469 case VBOXGUEST_IOCTL_WAITEVENT:
3470 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
3471 rc = vbgdIoCtl_WaitEvent(pDevExt, pSession, (VBoxGuestWaitEventInfo *)pvData,
3472 pcbDataReturned, pSession->R0Process != NIL_RTR0PROCESS);
3473 break;
3474
3475 case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
3476 CHECKRET_SIZE("CANCEL_ALL_WAITEVENTS", 0);
3477 rc = vbgdIoCtl_CancelAllWaitEvents(pDevExt, pSession);
3478 break;
3479
3480 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
3481 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
3482 rc = vbgdIoCtl_CtlFilterMask(pDevExt, pSession, (VBoxGuestFilterMaskInfo *)pvData);
3483 break;
3484
3485#ifdef VBOX_WITH_HGCM
3486 case VBOXGUEST_IOCTL_HGCM_CONNECT:
3487# ifdef RT_ARCH_AMD64
3488 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
3489# endif
3490 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
3491 rc = vbgdIoCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
3492 break;
3493
3494 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
3495# ifdef RT_ARCH_AMD64
3496 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
3497# endif
3498 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
3499 rc = vbgdIoCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
3500 break;
3501#endif /* VBOX_WITH_HGCM */
3502
3503 case VBOXGUEST_IOCTL_CHECK_BALLOON:
3504 CHECKRET_MIN_SIZE("CHECK_MEMORY_BALLOON", sizeof(VBoxGuestCheckBalloonInfo));
3505 rc = vbgdIoCtl_CheckMemoryBalloon(pDevExt, pSession, (VBoxGuestCheckBalloonInfo *)pvData, pcbDataReturned);
3506 break;
3507
3508 case VBOXGUEST_IOCTL_CHANGE_BALLOON:
3509 CHECKRET_MIN_SIZE("CHANGE_MEMORY_BALLOON", sizeof(VBoxGuestChangeBalloonInfo));
3510 rc = vbgdIoCtl_ChangeMemoryBalloon(pDevExt, pSession, (VBoxGuestChangeBalloonInfo *)pvData, pcbDataReturned);
3511 break;
3512
3513 case VBOXGUEST_IOCTL_WRITE_CORE_DUMP:
3514 CHECKRET_MIN_SIZE("WRITE_CORE_DUMP", sizeof(VBoxGuestWriteCoreDump));
3515 rc = vbgdIoCtl_WriteCoreDump(pDevExt, (VBoxGuestWriteCoreDump *)pvData);
3516 break;
3517
3518 case VBOXGUEST_IOCTL_SET_MOUSE_STATUS:
3519 CHECKRET_SIZE("SET_MOUSE_STATUS", sizeof(uint32_t));
3520 rc = vbgdIoCtl_SetMouseStatus(pDevExt, pSession, *(uint32_t *)pvData);
3521 break;
3522
3523#ifdef VBOX_WITH_DPC_LATENCY_CHECKER
3524 case VBOXGUEST_IOCTL_DPC_LATENCY_CHECKER:
3525 CHECKRET_SIZE("DPC_LATENCY_CHECKER", 0);
3526 rc = VbgdNtIOCtl_DpcLatencyChecker();
3527 break;
3528#endif
3529
3530 case VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE:
3531 CHECKRET_SIZE("GUEST_CAPS_ACQUIRE", sizeof(VBoxGuestCapsAquire));
3532 rc = vbgdIoCtl_GuestCapsAcquire(pDevExt, pSession, (VBoxGuestCapsAquire *)pvData);
3533 *pcbDataReturned = sizeof(VBoxGuestCapsAquire);
3534 break;
3535
3536 case VBOXGUEST_IOCTL_SET_GUEST_CAPABILITIES:
3537 CHECKRET_MIN_SIZE("SET_GUEST_CAPABILITIES", sizeof(VBoxGuestSetCapabilitiesInfo));
3538 rc = vbgdIoCtl_SetCapabilities(pDevExt, pSession, (VBoxGuestSetCapabilitiesInfo *)pvData);
3539 break;
3540
3541 default:
3542 {
3543 LogRel(("VbgdCommonIoCtl: Unknown request iFunction=%#x stripped size=%#x\n",
3544 iFunction, VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
3545 rc = VERR_NOT_SUPPORTED;
3546 break;
3547 }
3548 }
3549 }
3550
3551 LogFlow(("VbgdCommonIoCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
3552 return rc;
3553}
3554
3555
3556/**
3557 * Used by VbgdCommonISR as well as the acquire guest capability code.
3558 *
3559 * @returns VINF_SUCCESS on success. On failure, ORed together
3560 * RTSemEventMultiSignal errors (completes processing despite errors).
3561 * @param pDevExt The VBoxGuest device extension.
3562 * @param fEvents The events to dispatch.
3563 */
3564static int vbgdDispatchEventsLocked(PVBOXGUESTDEVEXT pDevExt, uint32_t fEvents)
3565{
3566 PVBOXGUESTWAIT pWait;
3567 PVBOXGUESTWAIT pSafe;
3568 int rc = VINF_SUCCESS;
3569
3570 fEvents |= pDevExt->f32PendingEvents;
3571
3572 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
3573 {
3574 uint32_t fHandledEvents = pWait->fReqEvents & fEvents;
3575 if ( fHandledEvents != 0
3576 && !pWait->fResEvents)
3577 {
3578 /* Does this one wait on any of the events we're dispatching? We do a quick
3579 check first, then deal with VBOXGUEST_ACQUIRE_STYLE_EVENTS as applicable. */
3580 if (fHandledEvents & VBOXGUEST_ACQUIRE_STYLE_EVENTS)
3581 fHandledEvents &= vbgdGetAllowedEventMaskForSession(pDevExt, pWait->pSession);
3582 if (fHandledEvents)
3583 {
3584 pWait->fResEvents = pWait->fReqEvents & fEvents & fHandledEvents;
3585 fEvents &= ~pWait->fResEvents;
3586 RTListNodeRemove(&pWait->ListNode);
3587#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
3588 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
3589#else
3590 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
3591 rc |= RTSemEventMultiSignal(pWait->Event);
3592#endif
3593 if (!fEvents)
3594 break;
3595 }
3596 }
3597 }
3598
3599 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
3600 return rc;
3601}
3602
3603
3604/**
3605 * Common interrupt service routine.
3606 *
3607 * This deals with events and with waking up thread waiting for those events.
3608 *
3609 * @returns true if it was our interrupt, false if it wasn't.
3610 * @param pDevExt The VBoxGuest device extension.
3611 */
3612bool VbgdCommonISR(PVBOXGUESTDEVEXT pDevExt)
3613{
3614 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
3615 bool fMousePositionChanged = false;
3616 int rc = 0;
3617 bool fOurIrq;
3618
3619 /*
3620 * Make sure we've initialized the device extension.
3621 */
3622 if (RT_UNLIKELY(!pReq))
3623 return false;
3624
3625 /*
3626 * Enter the spinlock and check if it's our IRQ or not.
3627 */
3628 RTSpinlockAcquire(pDevExt->EventSpinlock);
3629 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
3630 if (fOurIrq)
3631 {
3632 /*
3633 * Acknowlegde events.
3634 * We don't use VbglGRPerform here as it may take another spinlocks.
3635 */
3636 pReq->header.rc = VERR_INTERNAL_ERROR;
3637 pReq->events = 0;
3638 ASMCompilerBarrier();
3639 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
3640 ASMCompilerBarrier(); /* paranoia */
3641 if (RT_SUCCESS(pReq->header.rc))
3642 {
3643 uint32_t fEvents = pReq->events;
3644 PVBOXGUESTWAIT pWait;
3645 PVBOXGUESTWAIT pSafe;
3646
3647 Log3(("VbgdCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
3648
3649 /*
3650 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
3651 */
3652 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
3653 {
3654 fMousePositionChanged = true;
3655 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
3656#ifndef RT_OS_WINDOWS
3657 if (pDevExt->MouseNotifyCallback.pfnNotify)
3658 pDevExt->MouseNotifyCallback.pfnNotify(pDevExt->MouseNotifyCallback.pvUser);
3659#endif
3660 }
3661
3662#ifdef VBOX_WITH_HGCM
3663 /*
3664 * The HGCM event/list is kind of different in that we evaluate all entries.
3665 */
3666 if (fEvents & VMMDEV_EVENT_HGCM)
3667 {
3668 RTListForEachSafe(&pDevExt->HGCMWaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
3669 {
3670 if (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE)
3671 {
3672 pWait->fResEvents = VMMDEV_EVENT_HGCM;
3673 RTListNodeRemove(&pWait->ListNode);
3674# ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
3675 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
3676# else
3677 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
3678 rc |= RTSemEventMultiSignal(pWait->Event);
3679# endif
3680 }
3681 }
3682 fEvents &= ~VMMDEV_EVENT_HGCM;
3683 }
3684#endif
3685
3686 /*
3687 * Normal FIFO waiter evaluation.
3688 */
3689 rc |= vbgdDispatchEventsLocked(pDevExt, fEvents);
3690 }
3691 else /* something is serious wrong... */
3692 Log(("VbgdCommonISR: acknowledge events failed rc=%Rrc (events=%#x)!!\n",
3693 pReq->header.rc, pReq->events));
3694 }
3695 else
3696 Log3(("VbgdCommonISR: not ours\n"));
3697
3698 RTSpinlockRelease(pDevExt->EventSpinlock);
3699
3700#if defined(VBOXGUEST_USE_DEFERRED_WAKE_UP) && !defined(RT_OS_DARWIN) && !defined(RT_OS_WINDOWS)
3701 /*
3702 * Do wake-ups.
3703 * Note. On Windows this isn't possible at this IRQL, so a DPC will take
3704 * care of it. Same on darwin, doing it in the work loop callback.
3705 */
3706 VbgdCommonWaitDoWakeUps(pDevExt);
3707#endif
3708
3709 /*
3710 * Work the poll and async notification queues on OSes that implements that.
3711 * (Do this outside the spinlock to prevent some recursive spinlocking.)
3712 */
3713 if (fMousePositionChanged)
3714 {
3715 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
3716 VbgdNativeISRMousePollEvent(pDevExt);
3717 }
3718
3719 Assert(rc == 0);
3720 NOREF(rc);
3721 return fOurIrq;
3722}
3723
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette