VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 70095

Last change on this file since 70095 was 70095, checked in by vboxsync, 7 years ago

VBoxGuest: More host options.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 154.3 KB
Line 
1/* $Id: VBoxGuest.cpp 70095 2017-12-12 18:32:35Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27/** @page pg_vbdrv VBoxGuest
28 *
29 * VBoxGuest is the device driver for VMMDev.
30 *
31 * The device driver is shipped as part of the guest additions. It has roots in
32 * the host VMM support driver (usually known as VBoxDrv), so fixes in platform
33 * specific code may apply to both drivers.
34 *
35 * The common code lives in VBoxGuest.cpp and is compiled both as C++ and C.
36 * The VBoxGuest.cpp source file shall not contain platform specific code,
37 * though it must occationally do a few \#ifdef RT_OS_XXX tests to cater for
38 * platform differences. Though, in those cases, it is common that more than
39 * one platform needs special handling.
40 *
41 * On most platforms the device driver should create two device nodes, one for
42 * full (unrestricted) access to the feature set, and one which only provides a
43 * restrict set of functions. These are generally referred to as 'vboxguest'
44 * and 'vboxuser' respectively. Currently, this two device approach is only
45 * implemented on Linux!
46 *
47 */
48
49
50/*********************************************************************************************************************************
51* Header Files *
52*********************************************************************************************************************************/
53#define LOG_GROUP LOG_GROUP_DEFAULT
54#include "VBoxGuestInternal.h"
55#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
56#include <VBox/log.h>
57#include <VBox/HostServices/GuestPropertySvc.h>
58#include <iprt/ctype.h>
59#include <iprt/mem.h>
60#include <iprt/time.h>
61#include <iprt/memobj.h>
62#include <iprt/asm.h>
63#include <iprt/asm-amd64-x86.h>
64#include <iprt/string.h>
65#include <iprt/process.h>
66#include <iprt/assert.h>
67#include <iprt/param.h>
68#include <iprt/timer.h>
69#ifdef VBOX_WITH_HGCM
70# include <iprt/thread.h>
71#endif
72#include "version-generated.h"
73#if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
74# include "revision-generated.h"
75#endif
76#if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
77# include <iprt/rand.h>
78#endif
79
80
81/*********************************************************************************************************************************
82* Defined Constants And Macros *
83*********************************************************************************************************************************/
84#define VBOXGUEST_ACQUIRE_STYLE_EVENTS (VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST | VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST)
85
86
87/*********************************************************************************************************************************
88* Internal Functions *
89*********************************************************************************************************************************/
90#ifdef VBOX_WITH_HGCM
91static DECLCALLBACK(int) vgdrvHgcmAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
92#endif
93static int vgdrvIoCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession);
94static void vgdrvBitUsageTrackerClear(PVBOXGUESTBITUSAGETRACER pTracker);
95static uint32_t vgdrvGetAllowedEventMaskForSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession);
96static int vgdrvResetEventFilterOnHost(PVBOXGUESTDEVEXT pDevExt, uint32_t fFixedEvents);
97static int vgdrvResetMouseStatusOnHost(PVBOXGUESTDEVEXT pDevExt);
98static int vgdrvResetCapabilitiesOnHost(PVBOXGUESTDEVEXT pDevExt);
99static int vgdrvSetSessionEventFilter(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
100 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination);
101static int vgdrvSetSessionMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
102 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination);
103static int vgdrvSetSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
104 uint32_t fOrMask, uint32_t fNoMask,
105 uint32_t *pfSessionCaps, uint32_t *pfGlobalCaps, bool fSessionTermination);
106static int vgdrvAcquireSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
107 uint32_t fOrMask, uint32_t fNotMask, uint32_t fFlags, bool fSessionTermination);
108static int vgdrvDispatchEventsLocked(PVBOXGUESTDEVEXT pDevExt, uint32_t fEvents);
109
110
111/*********************************************************************************************************************************
112* Global Variables *
113*********************************************************************************************************************************/
114static const uint32_t g_cbChangeMemBalloonReq = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
115
116#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
117/**
118 * Drag in the rest of IRPT since we share it with the
119 * rest of the kernel modules on Solaris.
120 */
121PFNRT g_apfnVBoxGuestIPRTDeps[] =
122{
123 /* VirtioNet */
124 (PFNRT)RTRandBytes,
125 /* RTSemMutex* */
126 (PFNRT)RTSemMutexCreate,
127 (PFNRT)RTSemMutexDestroy,
128 (PFNRT)RTSemMutexRequest,
129 (PFNRT)RTSemMutexRequestNoResume,
130 (PFNRT)RTSemMutexRequestDebug,
131 (PFNRT)RTSemMutexRequestNoResumeDebug,
132 (PFNRT)RTSemMutexRelease,
133 (PFNRT)RTSemMutexIsOwned,
134 NULL
135};
136#endif /* RT_OS_DARWIN || RT_OS_SOLARIS */
137
138
139/**
140 * Reserves memory in which the VMM can relocate any guest mappings
141 * that are floating around.
142 *
143 * This operation is a little bit tricky since the VMM might not accept
144 * just any address because of address clashes between the three contexts
145 * it operates in, so use a small stack to perform this operation.
146 *
147 * @returns VBox status code (ignored).
148 * @param pDevExt The device extension.
149 */
150static int vgdrvInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
151{
152 /*
153 * Query the required space.
154 */
155 VMMDevReqHypervisorInfo *pReq;
156 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
157 if (RT_FAILURE(rc))
158 return rc;
159 pReq->hypervisorStart = 0;
160 pReq->hypervisorSize = 0;
161 rc = VbglR0GRPerform(&pReq->header);
162 if (RT_FAILURE(rc)) /* this shouldn't happen! */
163 {
164 VbglR0GRFree(&pReq->header);
165 return rc;
166 }
167
168 /*
169 * The VMM will report back if there is nothing it wants to map, like for
170 * instance in VT-x and AMD-V mode.
171 */
172 if (pReq->hypervisorSize == 0)
173 Log(("vgdrvInitFixateGuestMappings: nothing to do\n"));
174 else
175 {
176 /*
177 * We have to try several times since the host can be picky
178 * about certain addresses.
179 */
180 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
181 uint32_t cbHypervisor = pReq->hypervisorSize;
182 RTR0MEMOBJ ahTries[5];
183 uint32_t iTry;
184 bool fBitched = false;
185 Log(("vgdrvInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
186 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
187 {
188 /*
189 * Reserve space, or if that isn't supported, create a object for
190 * some fictive physical memory and map that in to kernel space.
191 *
192 * To make the code a bit uglier, most systems cannot help with
193 * 4MB alignment, so we have to deal with that in addition to
194 * having two ways of getting the memory.
195 */
196 uint32_t uAlignment = _4M;
197 RTR0MEMOBJ hObj;
198 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
199 if (rc == VERR_NOT_SUPPORTED)
200 {
201 uAlignment = PAGE_SIZE;
202 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
203 }
204 /*
205 * If both RTR0MemObjReserveKernel calls above failed because either not supported or
206 * not implemented at all at the current platform, try to map the memory object into the
207 * virtual kernel space.
208 */
209 if (rc == VERR_NOT_SUPPORTED)
210 {
211 if (hFictive == NIL_RTR0MEMOBJ)
212 {
213 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
214 if (RT_FAILURE(rc))
215 break;
216 hFictive = hObj;
217 }
218 uAlignment = _4M;
219 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
220 if (rc == VERR_NOT_SUPPORTED)
221 {
222 uAlignment = PAGE_SIZE;
223 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
224 }
225 }
226 if (RT_FAILURE(rc))
227 {
228 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
229 rc, cbHypervisor, uAlignment, iTry));
230 fBitched = true;
231 break;
232 }
233
234 /*
235 * Try set it.
236 */
237 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
238 pReq->header.rc = VERR_INTERNAL_ERROR;
239 pReq->hypervisorSize = cbHypervisor;
240 pReq->hypervisorStart = (RTGCPTR32)(uintptr_t)RTR0MemObjAddress(hObj);
241 if ( uAlignment == PAGE_SIZE
242 && pReq->hypervisorStart & (_4M - 1))
243 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
244 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
245
246 rc = VbglR0GRPerform(&pReq->header);
247 if (RT_SUCCESS(rc))
248 {
249 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
250 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
251 RTR0MemObjAddress(pDevExt->hGuestMappings),
252 RTR0MemObjSize(pDevExt->hGuestMappings),
253 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
254 break;
255 }
256 ahTries[iTry] = hObj;
257 }
258
259 /*
260 * Cleanup failed attempts.
261 */
262 while (iTry-- > 0)
263 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
264 if ( RT_FAILURE(rc)
265 && hFictive != NIL_RTR0PTR)
266 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
267 if (RT_FAILURE(rc) && !fBitched)
268 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
269 }
270 VbglR0GRFree(&pReq->header);
271
272 /*
273 * We ignore failed attempts for now.
274 */
275 return VINF_SUCCESS;
276}
277
278
279/**
280 * Undo what vgdrvInitFixateGuestMappings did.
281 *
282 * @param pDevExt The device extension.
283 */
284static void vgdrvTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
285{
286 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
287 {
288 /*
289 * Tell the host that we're going to free the memory we reserved for
290 * it, the free it up. (Leak the memory if anything goes wrong here.)
291 */
292 VMMDevReqHypervisorInfo *pReq;
293 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
294 if (RT_SUCCESS(rc))
295 {
296 pReq->hypervisorStart = 0;
297 pReq->hypervisorSize = 0;
298 rc = VbglR0GRPerform(&pReq->header);
299 VbglR0GRFree(&pReq->header);
300 }
301 if (RT_SUCCESS(rc))
302 {
303 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
304 AssertRC(rc);
305 }
306 else
307 LogRel(("vgdrvTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
308
309 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
310 }
311}
312
313
314
315/**
316 * Report the guest information to the host.
317 *
318 * @returns IPRT status code.
319 * @param enmOSType The OS type to report.
320 */
321static int vgdrvReportGuestInfo(VBOXOSTYPE enmOSType)
322{
323 /*
324 * Allocate and fill in the two guest info reports.
325 */
326 VMMDevReportGuestInfo2 *pReqInfo2 = NULL;
327 VMMDevReportGuestInfo *pReqInfo1 = NULL;
328 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReqInfo2, sizeof (VMMDevReportGuestInfo2), VMMDevReq_ReportGuestInfo2);
329 Log(("vgdrvReportGuestInfo: VbglR0GRAlloc VMMDevReportGuestInfo2 completed with rc=%Rrc\n", rc));
330 if (RT_SUCCESS(rc))
331 {
332 pReqInfo2->guestInfo.additionsMajor = VBOX_VERSION_MAJOR;
333 pReqInfo2->guestInfo.additionsMinor = VBOX_VERSION_MINOR;
334 pReqInfo2->guestInfo.additionsBuild = VBOX_VERSION_BUILD;
335 pReqInfo2->guestInfo.additionsRevision = VBOX_SVN_REV;
336 pReqInfo2->guestInfo.additionsFeatures = 0; /* (no features defined yet) */
337 RTStrCopy(pReqInfo2->guestInfo.szName, sizeof(pReqInfo2->guestInfo.szName), VBOX_VERSION_STRING);
338
339 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReqInfo1, sizeof (VMMDevReportGuestInfo), VMMDevReq_ReportGuestInfo);
340 Log(("vgdrvReportGuestInfo: VbglR0GRAlloc VMMDevReportGuestInfo completed with rc=%Rrc\n", rc));
341 if (RT_SUCCESS(rc))
342 {
343 pReqInfo1->guestInfo.interfaceVersion = VMMDEV_VERSION;
344 pReqInfo1->guestInfo.osType = enmOSType;
345
346 /*
347 * There are two protocols here:
348 * 1. Info2 + Info1. Supported by >=3.2.51.
349 * 2. Info1 and optionally Info2. The old protocol.
350 *
351 * We try protocol 1 first. It will fail with VERR_NOT_SUPPORTED
352 * if not supported by the VMMDev (message ordering requirement).
353 */
354 rc = VbglR0GRPerform(&pReqInfo2->header);
355 Log(("vgdrvReportGuestInfo: VbglR0GRPerform VMMDevReportGuestInfo2 completed with rc=%Rrc\n", rc));
356 if (RT_SUCCESS(rc))
357 {
358 rc = VbglR0GRPerform(&pReqInfo1->header);
359 Log(("vgdrvReportGuestInfo: VbglR0GRPerform VMMDevReportGuestInfo completed with rc=%Rrc\n", rc));
360 }
361 else if ( rc == VERR_NOT_SUPPORTED
362 || rc == VERR_NOT_IMPLEMENTED)
363 {
364 rc = VbglR0GRPerform(&pReqInfo1->header);
365 Log(("vgdrvReportGuestInfo: VbglR0GRPerform VMMDevReportGuestInfo completed with rc=%Rrc\n", rc));
366 if (RT_SUCCESS(rc))
367 {
368 rc = VbglR0GRPerform(&pReqInfo2->header);
369 Log(("vgdrvReportGuestInfo: VbglR0GRPerform VMMDevReportGuestInfo2 completed with rc=%Rrc\n", rc));
370 if (rc == VERR_NOT_IMPLEMENTED)
371 rc = VINF_SUCCESS;
372 }
373 }
374 VbglR0GRFree(&pReqInfo1->header);
375 }
376 VbglR0GRFree(&pReqInfo2->header);
377 }
378
379 return rc;
380}
381
382
383/**
384 * Report the guest driver status to the host.
385 *
386 * @returns IPRT status code.
387 * @param fActive Flag whether the driver is now active or not.
388 */
389static int vgdrvReportDriverStatus(bool fActive)
390{
391 /*
392 * Report guest status of the VBox driver to the host.
393 */
394 VMMDevReportGuestStatus *pReq2 = NULL;
395 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq2, sizeof(*pReq2), VMMDevReq_ReportGuestStatus);
396 Log(("vgdrvReportDriverStatus: VbglR0GRAlloc VMMDevReportGuestStatus completed with rc=%Rrc\n", rc));
397 if (RT_SUCCESS(rc))
398 {
399 pReq2->guestStatus.facility = VBoxGuestFacilityType_VBoxGuestDriver;
400 pReq2->guestStatus.status = fActive ?
401 VBoxGuestFacilityStatus_Active
402 : VBoxGuestFacilityStatus_Inactive;
403 pReq2->guestStatus.flags = 0;
404 rc = VbglR0GRPerform(&pReq2->header);
405 Log(("vgdrvReportDriverStatus: VbglR0GRPerform VMMDevReportGuestStatus completed with fActive=%d, rc=%Rrc\n",
406 fActive ? 1 : 0, rc));
407 if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
408 rc = VINF_SUCCESS;
409 VbglR0GRFree(&pReq2->header);
410 }
411
412 return rc;
413}
414
415
416/** @name Memory Ballooning
417 * @{
418 */
419
420/**
421 * Inflate the balloon by one chunk represented by an R0 memory object.
422 *
423 * The caller owns the balloon mutex.
424 *
425 * @returns IPRT status code.
426 * @param pMemObj Pointer to the R0 memory object.
427 * @param pReq The pre-allocated request for performing the VMMDev call.
428 */
429static int vgdrvBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
430{
431 uint32_t iPage;
432 int rc;
433
434 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
435 {
436 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
437 pReq->aPhysPage[iPage] = phys;
438 }
439
440 pReq->fInflate = true;
441 pReq->header.size = g_cbChangeMemBalloonReq;
442 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
443
444 rc = VbglR0GRPerform(&pReq->header);
445 if (RT_FAILURE(rc))
446 LogRel(("vgdrvBalloonInflate: VbglR0GRPerform failed. rc=%Rrc\n", rc));
447 return rc;
448}
449
450
451/**
452 * Deflate the balloon by one chunk - info the host and free the memory object.
453 *
454 * The caller owns the balloon mutex.
455 *
456 * @returns IPRT status code.
457 * @param pMemObj Pointer to the R0 memory object.
458 * The memory object will be freed afterwards.
459 * @param pReq The pre-allocated request for performing the VMMDev call.
460 */
461static int vgdrvBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
462{
463 uint32_t iPage;
464 int rc;
465
466 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
467 {
468 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
469 pReq->aPhysPage[iPage] = phys;
470 }
471
472 pReq->fInflate = false;
473 pReq->header.size = g_cbChangeMemBalloonReq;
474 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
475
476 rc = VbglR0GRPerform(&pReq->header);
477 if (RT_FAILURE(rc))
478 {
479 LogRel(("vgdrvBalloonDeflate: VbglR0GRPerform failed. rc=%Rrc\n", rc));
480 return rc;
481 }
482
483 rc = RTR0MemObjFree(*pMemObj, true);
484 if (RT_FAILURE(rc))
485 {
486 LogRel(("vgdrvBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
487 return rc;
488 }
489
490 *pMemObj = NIL_RTR0MEMOBJ;
491 return VINF_SUCCESS;
492}
493
494
495/**
496 * Inflate/deflate the memory balloon and notify the host.
497 *
498 * This is a worker used by vgdrvIoCtl_CheckMemoryBalloon - it takes the mutex.
499 *
500 * @returns VBox status code.
501 * @param pDevExt The device extension.
502 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
503 * @param pfHandleInR3 Where to return the handle-in-ring3 indicator
504 * (VINF_SUCCESS if set).
505 */
506static int vgdrvSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, bool *pfHandleInR3)
507{
508 int rc = VINF_SUCCESS;
509
510 if (pDevExt->MemBalloon.fUseKernelAPI)
511 {
512 VMMDevChangeMemBalloon *pReq;
513 uint32_t i;
514
515 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
516 {
517 LogRel(("vgdrvSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
518 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
519 return VERR_INVALID_PARAMETER;
520 }
521
522 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
523 return VINF_SUCCESS; /* nothing to do */
524
525 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
526 && !pDevExt->MemBalloon.paMemObj)
527 {
528 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
529 if (!pDevExt->MemBalloon.paMemObj)
530 {
531 LogRel(("vgdrvSetBalloonSizeKernel: no memory for paMemObj!\n"));
532 return VERR_NO_MEMORY;
533 }
534 }
535
536 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, g_cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
537 if (RT_FAILURE(rc))
538 return rc;
539
540 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
541 {
542 /* inflate */
543 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
544 {
545 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
546 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
547 if (RT_FAILURE(rc))
548 {
549 if (rc == VERR_NOT_SUPPORTED)
550 {
551 /* not supported -- fall back to the R3-allocated memory. */
552 rc = VINF_SUCCESS;
553 pDevExt->MemBalloon.fUseKernelAPI = false;
554 Assert(pDevExt->MemBalloon.cChunks == 0);
555 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
556 }
557 /* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
558 * cannot allocate more memory => don't try further, just stop here */
559 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
560 break;
561 }
562
563 rc = vgdrvBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
564 if (RT_FAILURE(rc))
565 {
566 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
567 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
568 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
569 break;
570 }
571 pDevExt->MemBalloon.cChunks++;
572 }
573 }
574 else
575 {
576 /* deflate */
577 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
578 {
579 rc = vgdrvBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
580 if (RT_FAILURE(rc))
581 {
582 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
583 break;
584 }
585 pDevExt->MemBalloon.cChunks--;
586 }
587 }
588
589 VbglR0GRFree(&pReq->header);
590 }
591
592 /*
593 * Set the handle-in-ring3 indicator. When set Ring-3 will have to work
594 * the balloon changes via the other API.
595 */
596 *pfHandleInR3 = pDevExt->MemBalloon.fUseKernelAPI ? false : true;
597
598 return rc;
599}
600
601
602/**
603 * Inflate/deflate the balloon by one chunk.
604 *
605 * Worker for vgdrvIoCtl_ChangeMemoryBalloon - it takes the mutex.
606 *
607 * @returns VBox status code.
608 * @param pDevExt The device extension.
609 * @param pSession The session.
610 * @param pvChunk The address of the chunk to add to / remove from the
611 * balloon. (user space address)
612 * @param fInflate Inflate if true, deflate if false.
613 */
614static int vgdrvSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, RTR3PTR pvChunk, bool fInflate)
615{
616 VMMDevChangeMemBalloon *pReq;
617 PRTR0MEMOBJ pMemObj = NULL;
618 int rc = VINF_SUCCESS;
619 uint32_t i;
620 RT_NOREF1(pSession);
621
622 if (fInflate)
623 {
624 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
625 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
626 {
627 LogRel(("vgdrvSetBalloonSizeFromUser: cannot inflate balloon, already have %u chunks (max=%u)\n",
628 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
629 return VERR_INVALID_PARAMETER;
630 }
631
632 if (!pDevExt->MemBalloon.paMemObj)
633 {
634 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
635 if (!pDevExt->MemBalloon.paMemObj)
636 {
637 LogRel(("vgdrvSetBalloonSizeFromUser: no memory for paMemObj!\n"));
638 return VERR_NO_MEMORY;
639 }
640 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
641 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
642 }
643 }
644 else
645 {
646 if (pDevExt->MemBalloon.cChunks == 0)
647 {
648 AssertMsgFailed(("vgdrvSetBalloonSizeFromUser: cannot decrease balloon, already at size 0\n"));
649 return VERR_INVALID_PARAMETER;
650 }
651 }
652
653 /*
654 * Enumerate all memory objects and check if the object is already registered.
655 */
656 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
657 {
658 if ( fInflate
659 && !pMemObj
660 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
661 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
662 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == pvChunk)
663 {
664 if (fInflate)
665 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
666 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
667 break;
668 }
669 }
670 if (!pMemObj)
671 {
672 if (fInflate)
673 {
674 /* no free object pointer found -- should not happen */
675 return VERR_NO_MEMORY;
676 }
677
678 /* cannot free this memory as it wasn't provided before */
679 return VERR_NOT_FOUND;
680 }
681
682 /*
683 * Try inflate / default the balloon as requested.
684 */
685 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, g_cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
686 if (RT_FAILURE(rc))
687 return rc;
688
689 if (fInflate)
690 {
691 rc = RTR0MemObjLockUser(pMemObj, pvChunk, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
692 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
693 if (RT_SUCCESS(rc))
694 {
695 rc = vgdrvBalloonInflate(pMemObj, pReq);
696 if (RT_SUCCESS(rc))
697 pDevExt->MemBalloon.cChunks++;
698 else
699 {
700 Log(("vgdrvSetBalloonSizeFromUser(inflate): failed, rc=%Rrc!\n", rc));
701 RTR0MemObjFree(*pMemObj, true);
702 *pMemObj = NIL_RTR0MEMOBJ;
703 }
704 }
705 }
706 else
707 {
708 rc = vgdrvBalloonDeflate(pMemObj, pReq);
709 if (RT_SUCCESS(rc))
710 pDevExt->MemBalloon.cChunks--;
711 else
712 Log(("vgdrvSetBalloonSizeFromUser(deflate): failed, rc=%Rrc!\n", rc));
713 }
714
715 VbglR0GRFree(&pReq->header);
716 return rc;
717}
718
719
720/**
721 * Cleanup the memory balloon of a session.
722 *
723 * Will request the balloon mutex, so it must be valid and the caller must not
724 * own it already.
725 *
726 * @param pDevExt The device extension.
727 * @param pSession The session. Can be NULL at unload.
728 */
729static void vgdrvCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
730{
731 RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
732 if ( pDevExt->MemBalloon.pOwner == pSession
733 || pSession == NULL /*unload*/)
734 {
735 if (pDevExt->MemBalloon.paMemObj)
736 {
737 VMMDevChangeMemBalloon *pReq;
738 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, g_cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
739 if (RT_SUCCESS(rc))
740 {
741 uint32_t i;
742 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
743 {
744 rc = vgdrvBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
745 if (RT_FAILURE(rc))
746 {
747 LogRel(("vgdrvCloseMemBalloon: Deflate failed with rc=%Rrc. Will leak %u chunks.\n",
748 rc, pDevExt->MemBalloon.cChunks));
749 break;
750 }
751 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
752 pDevExt->MemBalloon.cChunks--;
753 }
754 VbglR0GRFree(&pReq->header);
755 }
756 else
757 LogRel(("vgdrvCloseMemBalloon: Failed to allocate VMMDev request buffer (rc=%Rrc). Will leak %u chunks.\n",
758 rc, pDevExt->MemBalloon.cChunks));
759 RTMemFree(pDevExt->MemBalloon.paMemObj);
760 pDevExt->MemBalloon.paMemObj = NULL;
761 }
762
763 pDevExt->MemBalloon.pOwner = NULL;
764 }
765 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
766}
767
768/** @} */
769
770
771
772/** @name Heartbeat
773 * @{
774 */
775
776/**
777 * Sends heartbeat to host.
778 *
779 * @returns VBox status code.
780 */
781static int vgdrvHeartbeatSend(PVBOXGUESTDEVEXT pDevExt)
782{
783 int rc;
784 if (pDevExt->pReqGuestHeartbeat)
785 {
786 rc = VbglR0GRPerform(pDevExt->pReqGuestHeartbeat);
787 Log3(("vgdrvHeartbeatSend: VbglR0GRPerform vgdrvHeartbeatSend completed with rc=%Rrc\n", rc));
788 }
789 else
790 rc = VERR_INVALID_STATE;
791 return rc;
792}
793
794
795/**
796 * Callback for heartbeat timer.
797 */
798static DECLCALLBACK(void) vgdrvHeartbeatTimerHandler(PRTTIMER hTimer, void *pvUser, uint64_t iTick)
799{
800 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
801 int rc;
802 AssertReturnVoid(pDevExt);
803
804 rc = vgdrvHeartbeatSend(pDevExt);
805 if (RT_FAILURE(rc))
806 Log(("HB Timer: vgdrvHeartbeatSend failed: rc=%Rrc\n", rc));
807
808 NOREF(hTimer); NOREF(iTick);
809}
810
811
812/**
813 * Configure the host to check guest's heartbeat
814 * and get heartbeat interval from the host.
815 *
816 * @returns VBox status code.
817 * @param pDevExt The device extension.
818 * @param fEnabled Set true to enable guest heartbeat checks on host.
819 */
820static int vgdrvHeartbeatHostConfigure(PVBOXGUESTDEVEXT pDevExt, bool fEnabled)
821{
822 VMMDevReqHeartbeat *pReq;
823 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_HeartbeatConfigure);
824 Log(("vgdrvHeartbeatHostConfigure: VbglR0GRAlloc vgdrvHeartbeatHostConfigure completed with rc=%Rrc\n", rc));
825 if (RT_SUCCESS(rc))
826 {
827 pReq->fEnabled = fEnabled;
828 pReq->cNsInterval = 0;
829 rc = VbglR0GRPerform(&pReq->header);
830 Log(("vgdrvHeartbeatHostConfigure: VbglR0GRPerform vgdrvHeartbeatHostConfigure completed with rc=%Rrc\n", rc));
831 pDevExt->cNsHeartbeatInterval = pReq->cNsInterval;
832 VbglR0GRFree(&pReq->header);
833 }
834 return rc;
835}
836
837
838/**
839 * Initializes the heartbeat timer.
840 *
841 * This feature may be disabled by the host.
842 *
843 * @returns VBox status (ignored).
844 * @param pDevExt The device extension.
845 */
846static int vgdrvHeartbeatInit(PVBOXGUESTDEVEXT pDevExt)
847{
848 /*
849 * Make sure that heartbeat checking is disabled.
850 */
851 int rc = vgdrvHeartbeatHostConfigure(pDevExt, false);
852 if (RT_SUCCESS(rc))
853 {
854 rc = vgdrvHeartbeatHostConfigure(pDevExt, true);
855 if (RT_SUCCESS(rc))
856 {
857 /*
858 * Preallocate the request to use it from the timer callback because:
859 * 1) on Windows VbglR0GRAlloc must be called at IRQL <= APC_LEVEL
860 * and the timer callback runs at DISPATCH_LEVEL;
861 * 2) avoid repeated allocations.
862 */
863 rc = VbglR0GRAlloc(&pDevExt->pReqGuestHeartbeat, sizeof(*pDevExt->pReqGuestHeartbeat), VMMDevReq_GuestHeartbeat);
864 if (RT_SUCCESS(rc))
865 {
866 LogRel(("vgdrvHeartbeatInit: Setting up heartbeat to trigger every %RU64 milliseconds\n",
867 pDevExt->cNsHeartbeatInterval / RT_NS_1MS));
868 rc = RTTimerCreateEx(&pDevExt->pHeartbeatTimer, pDevExt->cNsHeartbeatInterval, 0 /*fFlags*/,
869 (PFNRTTIMER)vgdrvHeartbeatTimerHandler, pDevExt);
870 if (RT_SUCCESS(rc))
871 {
872 rc = RTTimerStart(pDevExt->pHeartbeatTimer, 0);
873 if (RT_SUCCESS(rc))
874 return VINF_SUCCESS;
875
876 LogRel(("vgdrvHeartbeatInit: Heartbeat timer failed to start, rc=%Rrc\n", rc));
877 }
878 else
879 LogRel(("vgdrvHeartbeatInit: Failed to create heartbeat timer: %Rrc\n", rc));
880
881 VbglR0GRFree(pDevExt->pReqGuestHeartbeat);
882 pDevExt->pReqGuestHeartbeat = NULL;
883 }
884 else
885 LogRel(("vgdrvHeartbeatInit: VbglR0GRAlloc(VMMDevReq_GuestHeartbeat): %Rrc\n", rc));
886
887 LogRel(("vgdrvHeartbeatInit: Failed to set up the timer, guest heartbeat is disabled\n"));
888 vgdrvHeartbeatHostConfigure(pDevExt, false);
889 }
890 else
891 LogRel(("vgdrvHeartbeatInit: Failed to configure host for heartbeat checking: rc=%Rrc\n", rc));
892 }
893 return rc;
894}
895
896/** @} */
897
898
899/**
900 * Helper to reinit the VMMDev communication after hibernation.
901 *
902 * @returns VBox status code.
903 * @param pDevExt The device extension.
904 * @param enmOSType The OS type.
905 *
906 * @todo Call this on all platforms, not just windows.
907 */
908int VGDrvCommonReinitDevExtAfterHibernation(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
909{
910 int rc = vgdrvReportGuestInfo(enmOSType);
911 if (RT_SUCCESS(rc))
912 {
913 rc = vgdrvReportDriverStatus(true /* Driver is active */);
914 if (RT_FAILURE(rc))
915 Log(("VGDrvCommonReinitDevExtAfterHibernation: could not report guest driver status, rc=%Rrc\n", rc));
916 }
917 else
918 Log(("VGDrvCommonReinitDevExtAfterHibernation: could not report guest information to host, rc=%Rrc\n", rc));
919 LogFlow(("VGDrvCommonReinitDevExtAfterHibernation: returned with rc=%Rrc\n", rc));
920 RT_NOREF1(pDevExt);
921 return rc;
922}
923
924
925/**
926 * Initializes the VBoxGuest device extension when the
927 * device driver is loaded.
928 *
929 * The native code locates the VMMDev on the PCI bus and retrieve
930 * the MMIO and I/O port ranges, this function will take care of
931 * mapping the MMIO memory (if present). Upon successful return
932 * the native code should set up the interrupt handler.
933 *
934 * @returns VBox status code.
935 *
936 * @param pDevExt The device extension. Allocated by the native code.
937 * @param IOPortBase The base of the I/O port range.
938 * @param pvMMIOBase The base of the MMIO memory mapping.
939 * This is optional, pass NULL if not present.
940 * @param cbMMIO The size of the MMIO memory mapping.
941 * This is optional, pass 0 if not present.
942 * @param enmOSType The guest OS type to report to the VMMDev.
943 * @param fFixedEvents Events that will be enabled upon init and no client
944 * will ever be allowed to mask.
945 */
946int VGDrvCommonInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
947 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
948{
949 int rc, rc2;
950
951#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
952 /*
953 * Create the release log.
954 */
955 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
956 PRTLOGGER pRelLogger;
957 rc = RTLogCreate(&pRelLogger, 0 /*fFlags*/, "all", "VBOXGUEST_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups,
958 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
959 if (RT_SUCCESS(rc))
960 RTLogRelSetDefaultInstance(pRelLogger);
961 /** @todo Add native hook for getting logger config parameters and setting
962 * them. On linux we should use the module parameter stuff... */
963#endif
964
965 /*
966 * Adjust fFixedEvents.
967 */
968#ifdef VBOX_WITH_HGCM
969 fFixedEvents |= VMMDEV_EVENT_HGCM;
970#endif
971
972 /*
973 * Initialize the data.
974 */
975 pDevExt->IOPortBase = IOPortBase;
976 pDevExt->pVMMDevMemory = NULL;
977 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
978 pDevExt->EventSpinlock = NIL_RTSPINLOCK;
979 pDevExt->pIrqAckEvents = NULL;
980 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
981 RTListInit(&pDevExt->WaitList);
982#ifdef VBOX_WITH_HGCM
983 RTListInit(&pDevExt->HGCMWaitList);
984#endif
985#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
986 RTListInit(&pDevExt->WakeUpList);
987#endif
988 RTListInit(&pDevExt->WokenUpList);
989 RTListInit(&pDevExt->FreeList);
990 RTListInit(&pDevExt->SessionList);
991 pDevExt->cSessions = 0;
992 pDevExt->fLoggingEnabled = false;
993 pDevExt->f32PendingEvents = 0;
994 pDevExt->u32MousePosChangedSeq = 0;
995 pDevExt->SessionSpinlock = NIL_RTSPINLOCK;
996 pDevExt->MemBalloon.hMtx = NIL_RTSEMFASTMUTEX;
997 pDevExt->MemBalloon.cChunks = 0;
998 pDevExt->MemBalloon.cMaxChunks = 0;
999 pDevExt->MemBalloon.fUseKernelAPI = true;
1000 pDevExt->MemBalloon.paMemObj = NULL;
1001 pDevExt->MemBalloon.pOwner = NULL;
1002 pDevExt->pfnMouseNotifyCallback = NULL;
1003 pDevExt->pvMouseNotifyCallbackArg = NULL;
1004 pDevExt->pReqGuestHeartbeat = NULL;
1005
1006 pDevExt->fFixedEvents = fFixedEvents;
1007 vgdrvBitUsageTrackerClear(&pDevExt->EventFilterTracker);
1008 pDevExt->fEventFilterHost = UINT32_MAX; /* forces a report */
1009
1010 vgdrvBitUsageTrackerClear(&pDevExt->MouseStatusTracker);
1011 pDevExt->fMouseStatusHost = UINT32_MAX; /* forces a report */
1012
1013 pDevExt->fAcquireModeGuestCaps = 0;
1014 pDevExt->fSetModeGuestCaps = 0;
1015 pDevExt->fAcquiredGuestCaps = 0;
1016 vgdrvBitUsageTrackerClear(&pDevExt->SetGuestCapsTracker);
1017 pDevExt->fGuestCapsHost = UINT32_MAX; /* forces a report */
1018
1019 /*
1020 * If there is an MMIO region validate the version and size.
1021 */
1022 if (pvMMIOBase)
1023 {
1024 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
1025 Assert(cbMMIO);
1026 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
1027 && pVMMDev->u32Size >= 32
1028 && pVMMDev->u32Size <= cbMMIO)
1029 {
1030 pDevExt->pVMMDevMemory = pVMMDev;
1031 Log(("VGDrvCommonInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
1032 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
1033 }
1034 else /* try live without it. */
1035 LogRel(("VGDrvCommonInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
1036 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
1037 }
1038
1039 /*
1040 * Create the wait and session spinlocks as well as the ballooning mutex.
1041 */
1042 rc = RTSpinlockCreate(&pDevExt->EventSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestEvent");
1043 if (RT_SUCCESS(rc))
1044 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestSession");
1045 if (RT_FAILURE(rc))
1046 {
1047 LogRel(("VGDrvCommonInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
1048 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
1049 RTSpinlockDestroy(pDevExt->EventSpinlock);
1050 return rc;
1051 }
1052
1053 rc = RTSemFastMutexCreate(&pDevExt->MemBalloon.hMtx);
1054 if (RT_FAILURE(rc))
1055 {
1056 LogRel(("VGDrvCommonInitDevExt: failed to create mutex, rc=%Rrc!\n", rc));
1057 RTSpinlockDestroy(pDevExt->SessionSpinlock);
1058 RTSpinlockDestroy(pDevExt->EventSpinlock);
1059 return rc;
1060 }
1061
1062 /*
1063 * Initialize the guest library and report the guest info back to VMMDev,
1064 * set the interrupt control filter mask, and fixate the guest mappings
1065 * made by the VMM.
1066 */
1067 rc = VbglR0InitPrimary(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
1068 if (RT_SUCCESS(rc))
1069 {
1070 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
1071 if (RT_SUCCESS(rc))
1072 {
1073 pDevExt->PhysIrqAckEvents = VbglR0PhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
1074 Assert(pDevExt->PhysIrqAckEvents != 0);
1075
1076 rc = vgdrvReportGuestInfo(enmOSType);
1077 if (RT_SUCCESS(rc))
1078 {
1079 /*
1080 * Set the fixed event and make sure the host doesn't have any lingering
1081 * the guest capabilities or mouse status bits set.
1082 */
1083 rc = vgdrvResetEventFilterOnHost(pDevExt, pDevExt->fFixedEvents);
1084 if (RT_SUCCESS(rc))
1085 {
1086 rc = vgdrvResetCapabilitiesOnHost(pDevExt);
1087 if (RT_SUCCESS(rc))
1088 {
1089 rc = vgdrvResetMouseStatusOnHost(pDevExt);
1090 if (RT_SUCCESS(rc))
1091 {
1092 /*
1093 * Initialize stuff which may fail without requiring the driver init to fail.
1094 */
1095 vgdrvInitFixateGuestMappings(pDevExt);
1096 vgdrvHeartbeatInit(pDevExt);
1097
1098 /*
1099 * Done!
1100 */
1101 rc = vgdrvReportDriverStatus(true /* Driver is active */);
1102 if (RT_FAILURE(rc))
1103 LogRel(("VGDrvCommonInitDevExt: VBoxReportGuestDriverStatus failed, rc=%Rrc\n", rc));
1104
1105 LogFlowFunc(("VGDrvCommonInitDevExt: returns success\n"));
1106 return VINF_SUCCESS;
1107 }
1108 LogRel(("VGDrvCommonInitDevExt: failed to clear mouse status: rc=%Rrc\n", rc));
1109 }
1110 else
1111 LogRel(("VGDrvCommonInitDevExt: failed to clear guest capabilities: rc=%Rrc\n", rc));
1112 }
1113 else
1114 LogRel(("VGDrvCommonInitDevExt: failed to set fixed event filter: rc=%Rrc\n", rc));
1115 }
1116 else
1117 LogRel(("VGDrvCommonInitDevExt: vgdrvReportGuestInfo failed: rc=%Rrc\n", rc));
1118 VbglR0GRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
1119 }
1120 else
1121 LogRel(("VGDrvCommonInitDevExt: VbglR0GRAlloc failed: rc=%Rrc\n", rc));
1122
1123 VbglR0TerminatePrimary();
1124 }
1125 else
1126 LogRel(("VGDrvCommonInitDevExt: VbglR0InitPrimary failed: rc=%Rrc\n", rc));
1127
1128 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
1129 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
1130 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
1131
1132#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
1133 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
1134 RTLogDestroy(RTLogSetDefaultInstance(NULL));
1135#endif
1136 return rc; /* (failed) */
1137}
1138
1139
1140/**
1141 * Checks if the given option can be taken to not mean 'false'.
1142 *
1143 * @returns true or false accordingly.
1144 * @param pszValue The value to consider.
1145 */
1146bool VBDrvCommonIsOptionValueTrue(const char *pszValue)
1147{
1148 if (pszValue)
1149 {
1150 char ch;
1151 while ( (ch = *pszValue) != '\0'
1152 && RT_C_IS_SPACE(ch))
1153 pszValue++;
1154
1155 return ch != '\0'
1156 && ch != 'n' /* no */
1157 && ch != 'N' /* NO */
1158 && ch != 'd' /* disabled */
1159 && ch != 'f' /* false*/
1160 && ch != 'F' /* FALSE */
1161 && ch != 'D' /* DISABLED */
1162 && ( (ch != 'o' && ch != 'O') /* off, OFF, Off */
1163 || (pszValue[1] != 'f' && pszValue[1] != 'F') )
1164 && (ch != '0' || pszValue[1] != '\0') /* '0' */
1165 ;
1166 }
1167 return false;
1168}
1169
1170
1171/**
1172 * Processes a option.
1173 *
1174 * This will let the OS specific code have a go at it too.
1175 *
1176 * @param pDevExt The device extension.
1177 * @param pszName The option name, sans prefix.
1178 * @param pszValue The option value.
1179 */
1180void VGDrvCommonProcessOption(PVBOXGUESTDEVEXT pDevExt, const char *pszName, const char *pszValue)
1181{
1182 Log(("VGDrvCommonProcessOption: pszName='%s' pszValue='%s'\n", pszName, pszValue));
1183
1184 if ( RTStrICmpAscii(pszName, "r3_log_to_host") == 0
1185 || RTStrICmpAscii(pszName, "LoggingEnabled") == 0 /*legacy*/ )
1186 pDevExt->fLoggingEnabled = VBDrvCommonIsOptionValueTrue(pszValue);
1187 else if ( RTStrNICmp(pszName, RT_STR_TUPLE("log")) == 0
1188 || RTStrNICmpAscii(pszName, RT_STR_TUPLE("dbg_log")) == 0)
1189 {
1190 bool const fLogRel = *pszName == 'd' || *pszName == 'D';
1191 const char *pszSubName = &pszName[fLogRel ? 4 + 3 : 3];
1192 if ( !*pszSubName
1193 || RTStrICmpAscii(pszSubName, "_flags") == 0
1194 || RTStrICmpAscii(pszSubName, "_dest") == 0)
1195 {
1196 PRTLOGGER pLogger = fLogRel ? RTLogRelGetDefaultInstance() : RTLogDefaultInstance();
1197 if (pLogger)
1198 {
1199 if (!*pszSubName)
1200 RTLogGroupSettings(pLogger, pszValue);
1201 else if (RTStrICmpAscii(pszSubName, "_flags"))
1202 RTLogFlags(pLogger, pszValue);
1203 else
1204 RTLogDestinations(pLogger, pszValue);
1205 }
1206 }
1207 else if (!VGDrvNativeProcessOption(pDevExt, pszName, pszValue))
1208 LogRel(("VBoxGuest: Ignoring unknown option '%s' (value '%s')\n", pszName, pszValue));
1209 }
1210 else if (!VGDrvNativeProcessOption(pDevExt, pszName, pszValue))
1211 LogRel(("VBoxGuest: Ignoring unknown option '%s' (value '%s')\n", pszName, pszValue));
1212}
1213
1214
1215/**
1216 * Read driver configuration from the host.
1217 *
1218 * This involves connecting to the guest properties service, which means that
1219 * interrupts needs to work and that the calling thread must be able to block.
1220 *
1221 * @param pDevExt The device extension.
1222 */
1223void VGDrvCommonProcessOptionsFromHost(PVBOXGUESTDEVEXT pDevExt)
1224{
1225 /*
1226 * Create a kernel session without our selves, then connect to the HGCM service.
1227 */
1228 PVBOXGUESTSESSION pSession;
1229 int rc = VGDrvCommonCreateKernelSession(pDevExt, &pSession);
1230 if (RT_SUCCESS(rc))
1231 {
1232 union
1233 {
1234 VBGLIOCHGCMCONNECT Connect;
1235 VBGLIOCHGCMDISCONNECT Disconnect;
1236 GuestPropMsgEnumProperties EnumMsg;
1237 } uBuf;
1238
1239 RT_ZERO(uBuf.Connect);
1240 VBGLREQHDR_INIT(&uBuf.Connect.Hdr, HGCM_CONNECT);
1241 uBuf.Connect.u.In.Loc.type = VMMDevHGCMLoc_LocalHost_Existing;
1242 RTStrCopy(uBuf.Connect.u.In.Loc.u.host.achName, sizeof(uBuf.Connect.u.In.Loc.u.host.achName),
1243 "VBoxGuestPropSvc"); /** @todo Add a define to the header for the name. */
1244 rc = VGDrvCommonIoCtl(VBGL_IOCTL_HGCM_CONNECT, pDevExt, pSession, &uBuf.Connect.Hdr, sizeof(uBuf.Connect));
1245 if (RT_SUCCESS(rc))
1246 {
1247 static const char g_szzPattern[] = "/VirtualBox/GuestAdd/VBoxGuest/*\0";
1248 uint32_t const idClient = uBuf.Connect.u.Out.idClient;
1249 char *pszzStrings = NULL;
1250 uint32_t cbStrings;
1251
1252 /*
1253 * Enumerate all the relevant properties. We try with a 1KB buffer, but
1254 * will double it until we get what we want or go beyond 16KB.
1255 */
1256 for (cbStrings = _1K; cbStrings <= _16K; cbStrings *= 2)
1257 {
1258 pszzStrings = (char *)RTMemAllocZ(cbStrings);
1259 if (pszzStrings)
1260 {
1261 VBGL_HGCM_HDR_INIT(&uBuf.EnumMsg.hdr, idClient, GUEST_PROP_FN_ENUM_PROPS, 3);
1262
1263 uBuf.EnumMsg.patterns.type = VMMDevHGCMParmType_LinAddr;
1264 uBuf.EnumMsg.patterns.u.Pointer.size = sizeof(g_szzPattern);
1265 uBuf.EnumMsg.patterns.u.Pointer.u.linearAddr = (uintptr_t)g_szzPattern;
1266
1267 uBuf.EnumMsg.strings.type = VMMDevHGCMParmType_LinAddr;
1268 uBuf.EnumMsg.strings.u.Pointer.size = cbStrings;
1269 uBuf.EnumMsg.strings.u.Pointer.u.linearAddr = (uintptr_t)pszzStrings;
1270
1271 uBuf.EnumMsg.size.type = VMMDevHGCMParmType_32bit;
1272 uBuf.EnumMsg.size.u.value32 = 0;
1273
1274 rc = VGDrvCommonIoCtl(VBGL_IOCTL_HGCM_CALL(sizeof(uBuf.EnumMsg)), pDevExt, pSession,
1275 &uBuf.EnumMsg.hdr.Hdr, sizeof(uBuf.EnumMsg));
1276 if (RT_SUCCESS(rc))
1277 {
1278 if ( uBuf.EnumMsg.size.type == VMMDevHGCMParmType_32bit
1279 && uBuf.EnumMsg.size.u.value32 <= cbStrings
1280 && uBuf.EnumMsg.size.u.value32 > 0)
1281 cbStrings = uBuf.EnumMsg.size.u.value32;
1282 Log(("VGDrvCommonReadConfigurationFromHost: GUEST_PROP_FN_ENUM_PROPS -> %#x bytes (cbStrings=%#x)\n",
1283 uBuf.EnumMsg.size.u.value32, cbStrings));
1284 break;
1285 }
1286
1287 RTMemFree(pszzStrings);
1288 pszzStrings = NULL;
1289 }
1290 else
1291 {
1292 LogRel(("VGDrvCommonReadConfigurationFromHost: failed to allocate %#x bytes\n", cbStrings));
1293 break;
1294 }
1295 }
1296
1297 /*
1298 * Disconnect and destroy the session.
1299 */
1300 VBGLREQHDR_INIT(&uBuf.Disconnect.Hdr, HGCM_DISCONNECT);
1301 uBuf.Disconnect.u.In.idClient = idClient;
1302 VGDrvCommonIoCtl(VBGL_IOCTL_HGCM_DISCONNECT, pDevExt, pSession, &uBuf.Disconnect.Hdr, sizeof(uBuf.Disconnect));
1303
1304 VGDrvCommonCloseSession(pDevExt, pSession);
1305
1306 /*
1307 * Process the properties if we got any.
1308 *
1309 * The string buffer contains packed strings in groups of four - name, value,
1310 * timestamp (as a decimal string) and flags. It is terminated by four empty
1311 * strings. Layout:
1312 * Name\0Value\0Timestamp\0Flags\0
1313 */
1314 if (pszzStrings)
1315 {
1316 uint32_t off;
1317 for (off = 0; off < cbStrings; off++)
1318 {
1319 /*
1320 * Parse the four fields, checking that it's all plain ASCII w/o any control characters.
1321 */
1322 const char *apszFields[4] = { NULL, NULL, NULL, NULL };
1323 bool fValidFields = true;
1324 unsigned iField;
1325 for (iField = 0; iField < RT_ELEMENTS(apszFields); iField++)
1326 {
1327 apszFields[0] = &pszzStrings[off];
1328 while (off < cbStrings)
1329 {
1330 char ch = pszzStrings[off++];
1331 if ((unsigned)ch < 0x20U || (unsigned)ch > 0x7fU)
1332 {
1333 if (!ch)
1334 break;
1335 if (fValidFields)
1336 Log(("VGDrvCommonReadConfigurationFromHost: Invalid char %#x at %#x (field %u)\n",
1337 ch, off - 1, iField));
1338 fValidFields = false;
1339 }
1340 }
1341 }
1342 if ( off <= cbStrings
1343 && fValidFields
1344 && *apszFields[0] != '\0')
1345 {
1346 /*
1347 * Validate and convert the flags to integer, then process the option.
1348 */
1349 uint32_t fFlags = 0;
1350 rc = GuestPropValidateFlags(apszFields[3], &fFlags);
1351 if (RT_SUCCESS(rc))
1352 {
1353 if (fFlags & GUEST_PROP_F_RDONLYGUEST)
1354 {
1355 apszFields[0] += sizeof(g_szzPattern) - 2;
1356 VGDrvCommonProcessOption(pDevExt, apszFields[0], apszFields[1]);
1357 }
1358 else
1359 LogRel(("VBoxGuest: Ignoring '%s' as it does not have RDONLYGUEST set\n", apszFields[0]));
1360 }
1361 else
1362 LogRel(("VBoxGuest: Invalid flags '%s' for '%s': %Rrc\n", apszFields[2], apszFields[0], rc));
1363 }
1364 else if (off < cbStrings)
1365 {
1366 LogRel(("VBoxGuest: Malformed guest properties enum result!\n"));
1367 Log(("VBoxGuest: off=%#x cbStrings=%#x\n%.*Rhxd\n", off, cbStrings, cbStrings, pszzStrings));
1368 break;
1369 }
1370 else if (!fValidFields)
1371 LogRel(("VBoxGuest: Ignoring %.*Rhxs as it has invalid characters in one or more fields\n",
1372 (int)strlen(apszFields[0]), apszFields[0]));
1373 else
1374 break;
1375 }
1376
1377 RTMemFree(pszzStrings);
1378 }
1379 else
1380 LogRel(("VGDrvCommonReadConfigurationFromHost: failed to enumerate '%s': %Rrc\n", g_szzPattern, rc));
1381
1382 }
1383 else
1384 LogRel(("VGDrvCommonReadConfigurationFromHost: failed to connect: %Rrc\n", rc));
1385 }
1386 else
1387 LogRel(("VGDrvCommonReadConfigurationFromHost: failed to connect: %Rrc\n", rc));
1388}
1389
1390
1391/**
1392 * Deletes all the items in a wait chain.
1393 * @param pList The head of the chain.
1394 */
1395static void vgdrvDeleteWaitList(PRTLISTNODE pList)
1396{
1397 while (!RTListIsEmpty(pList))
1398 {
1399 int rc2;
1400 PVBOXGUESTWAIT pWait = RTListGetFirst(pList, VBOXGUESTWAIT, ListNode);
1401 RTListNodeRemove(&pWait->ListNode);
1402
1403 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
1404 pWait->Event = NIL_RTSEMEVENTMULTI;
1405 pWait->pSession = NULL;
1406 RTMemFree(pWait);
1407 }
1408}
1409
1410
1411/**
1412 * Destroys the VBoxGuest device extension.
1413 *
1414 * The native code should call this before the driver is loaded,
1415 * but don't call this on shutdown.
1416 *
1417 * @param pDevExt The device extension.
1418 */
1419void VGDrvCommonDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
1420{
1421 int rc2;
1422 Log(("VGDrvCommonDeleteDevExt:\n"));
1423 Log(("VBoxGuest: The additions driver is terminating.\n"));
1424
1425 /*
1426 * Stop and destroy HB timer and
1427 * disable host heartbeat checking.
1428 */
1429 if (pDevExt->pHeartbeatTimer)
1430 {
1431 RTTimerDestroy(pDevExt->pHeartbeatTimer);
1432 vgdrvHeartbeatHostConfigure(pDevExt, false);
1433 }
1434
1435 VbglR0GRFree(pDevExt->pReqGuestHeartbeat);
1436 pDevExt->pReqGuestHeartbeat = NULL;
1437
1438 /*
1439 * Clean up the bits that involves the host first.
1440 */
1441 vgdrvTermUnfixGuestMappings(pDevExt);
1442 if (!RTListIsEmpty(&pDevExt->SessionList))
1443 {
1444 LogRelFunc(("session list not empty!\n"));
1445 RTListInit(&pDevExt->SessionList);
1446 }
1447 /* Update the host flags (mouse status etc) not to reflect this session. */
1448 pDevExt->fFixedEvents = 0;
1449 vgdrvResetEventFilterOnHost(pDevExt, 0 /*fFixedEvents*/);
1450 vgdrvResetCapabilitiesOnHost(pDevExt);
1451 vgdrvResetMouseStatusOnHost(pDevExt);
1452
1453 vgdrvCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
1454
1455 /*
1456 * Cleanup all the other resources.
1457 */
1458 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
1459 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
1460 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
1461
1462 vgdrvDeleteWaitList(&pDevExt->WaitList);
1463#ifdef VBOX_WITH_HGCM
1464 vgdrvDeleteWaitList(&pDevExt->HGCMWaitList);
1465#endif
1466#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1467 vgdrvDeleteWaitList(&pDevExt->WakeUpList);
1468#endif
1469 vgdrvDeleteWaitList(&pDevExt->WokenUpList);
1470 vgdrvDeleteWaitList(&pDevExt->FreeList);
1471
1472 VbglR0TerminatePrimary();
1473
1474 pDevExt->pVMMDevMemory = NULL;
1475
1476 pDevExt->IOPortBase = 0;
1477 pDevExt->pIrqAckEvents = NULL;
1478
1479#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
1480 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
1481 RTLogDestroy(RTLogSetDefaultInstance(NULL));
1482#endif
1483
1484}
1485
1486
1487/**
1488 * Creates a VBoxGuest user session.
1489 *
1490 * The native code calls this when a ring-3 client opens the device.
1491 * Use VGDrvCommonCreateKernelSession when a ring-0 client connects.
1492 *
1493 * @returns VBox status code.
1494 * @param pDevExt The device extension.
1495 * @param ppSession Where to store the session on success.
1496 */
1497int VGDrvCommonCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
1498{
1499 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
1500 if (RT_UNLIKELY(!pSession))
1501 {
1502 LogRel(("VGDrvCommonCreateUserSession: no memory!\n"));
1503 return VERR_NO_MEMORY;
1504 }
1505
1506 pSession->Process = RTProcSelf();
1507 pSession->R0Process = RTR0ProcHandleSelf();
1508 pSession->pDevExt = pDevExt;
1509 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1510 RTListAppend(&pDevExt->SessionList, &pSession->ListNode);
1511 pDevExt->cSessions++;
1512 RTSpinlockRelease(pDevExt->SessionSpinlock);
1513
1514 *ppSession = pSession;
1515 LogFlow(("VGDrvCommonCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1516 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1517 return VINF_SUCCESS;
1518}
1519
1520
1521/**
1522 * Creates a VBoxGuest kernel session.
1523 *
1524 * The native code calls this when a ring-0 client connects to the device.
1525 * Use VGDrvCommonCreateUserSession when a ring-3 client opens the device.
1526 *
1527 * @returns VBox status code.
1528 * @param pDevExt The device extension.
1529 * @param ppSession Where to store the session on success.
1530 */
1531int VGDrvCommonCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
1532{
1533 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
1534 if (RT_UNLIKELY(!pSession))
1535 {
1536 LogRel(("VGDrvCommonCreateKernelSession: no memory!\n"));
1537 return VERR_NO_MEMORY;
1538 }
1539
1540 pSession->Process = NIL_RTPROCESS;
1541 pSession->R0Process = NIL_RTR0PROCESS;
1542 pSession->pDevExt = pDevExt;
1543 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1544 RTListAppend(&pDevExt->SessionList, &pSession->ListNode);
1545 pDevExt->cSessions++;
1546 RTSpinlockRelease(pDevExt->SessionSpinlock);
1547
1548 *ppSession = pSession;
1549 LogFlow(("VGDrvCommonCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1550 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1551 return VINF_SUCCESS;
1552}
1553
1554
1555/**
1556 * Closes a VBoxGuest session.
1557 *
1558 * @param pDevExt The device extension.
1559 * @param pSession The session to close (and free).
1560 */
1561void VGDrvCommonCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1562{
1563#ifdef VBOX_WITH_HGCM
1564 unsigned i;
1565#endif
1566 LogFlow(("VGDrvCommonCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1567 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1568
1569 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1570 RTListNodeRemove(&pSession->ListNode);
1571 pDevExt->cSessions--;
1572 RTSpinlockRelease(pDevExt->SessionSpinlock);
1573 vgdrvAcquireSessionCapabilities(pDevExt, pSession, 0, UINT32_MAX, VBGL_IOC_AGC_FLAGS_DEFAULT, true /*fSessionTermination*/);
1574 vgdrvSetSessionCapabilities(pDevExt, pSession, 0 /*fOrMask*/, UINT32_MAX /*fNotMask*/,
1575 NULL /*pfSessionCaps*/, NULL /*pfGlobalCaps*/, true /*fSessionTermination*/);
1576 vgdrvSetSessionEventFilter(pDevExt, pSession, 0 /*fOrMask*/, UINT32_MAX /*fNotMask*/, true /*fSessionTermination*/);
1577 vgdrvSetSessionMouseStatus(pDevExt, pSession, 0 /*fOrMask*/, UINT32_MAX /*fNotMask*/, true /*fSessionTermination*/);
1578
1579 vgdrvIoCtl_CancelAllWaitEvents(pDevExt, pSession);
1580
1581#ifdef VBOX_WITH_HGCM
1582 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1583 if (pSession->aHGCMClientIds[i])
1584 {
1585 uint32_t idClient = pSession->aHGCMClientIds[i];
1586 pSession->aHGCMClientIds[i] = 0;
1587 Log(("VGDrvCommonCloseSession: disconnecting client id %#RX32\n", idClient));
1588 VbglR0HGCMInternalDisconnect(idClient, vgdrvHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1589 }
1590#endif
1591
1592 pSession->pDevExt = NULL;
1593 pSession->Process = NIL_RTPROCESS;
1594 pSession->R0Process = NIL_RTR0PROCESS;
1595 vgdrvCloseMemBalloon(pDevExt, pSession);
1596 RTMemFree(pSession);
1597}
1598
1599
1600/**
1601 * Allocates a wait-for-event entry.
1602 *
1603 * @returns The wait-for-event entry.
1604 * @param pDevExt The device extension.
1605 * @param pSession The session that's allocating this. Can be NULL.
1606 */
1607static PVBOXGUESTWAIT vgdrvWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1608{
1609 /*
1610 * Allocate it one way or the other.
1611 */
1612 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1613 if (pWait)
1614 {
1615 RTSpinlockAcquire(pDevExt->EventSpinlock);
1616
1617 pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1618 if (pWait)
1619 RTListNodeRemove(&pWait->ListNode);
1620
1621 RTSpinlockRelease(pDevExt->EventSpinlock);
1622 }
1623 if (!pWait)
1624 {
1625 int rc;
1626
1627 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1628 if (!pWait)
1629 {
1630 LogRelMax(32, ("vgdrvWaitAlloc: out-of-memory!\n"));
1631 return NULL;
1632 }
1633
1634 rc = RTSemEventMultiCreate(&pWait->Event);
1635 if (RT_FAILURE(rc))
1636 {
1637 LogRelMax(32, ("vgdrvWaitAlloc: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
1638 RTMemFree(pWait);
1639 return NULL;
1640 }
1641
1642 pWait->ListNode.pNext = NULL;
1643 pWait->ListNode.pPrev = NULL;
1644 }
1645
1646 /*
1647 * Zero members just as an precaution.
1648 */
1649 pWait->fReqEvents = 0;
1650 pWait->fResEvents = 0;
1651#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1652 pWait->fPendingWakeUp = false;
1653 pWait->fFreeMe = false;
1654#endif
1655 pWait->pSession = pSession;
1656#ifdef VBOX_WITH_HGCM
1657 pWait->pHGCMReq = NULL;
1658#endif
1659 RTSemEventMultiReset(pWait->Event);
1660 return pWait;
1661}
1662
1663
1664/**
1665 * Frees the wait-for-event entry.
1666 *
1667 * The caller must own the wait spinlock !
1668 * The entry must be in a list!
1669 *
1670 * @param pDevExt The device extension.
1671 * @param pWait The wait-for-event entry to free.
1672 */
1673static void vgdrvWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1674{
1675 pWait->fReqEvents = 0;
1676 pWait->fResEvents = 0;
1677#ifdef VBOX_WITH_HGCM
1678 pWait->pHGCMReq = NULL;
1679#endif
1680#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1681 Assert(!pWait->fFreeMe);
1682 if (pWait->fPendingWakeUp)
1683 pWait->fFreeMe = true;
1684 else
1685#endif
1686 {
1687 RTListNodeRemove(&pWait->ListNode);
1688 RTListAppend(&pDevExt->FreeList, &pWait->ListNode);
1689 }
1690}
1691
1692
1693/**
1694 * Frees the wait-for-event entry.
1695 *
1696 * @param pDevExt The device extension.
1697 * @param pWait The wait-for-event entry to free.
1698 */
1699static void vgdrvWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1700{
1701 RTSpinlockAcquire(pDevExt->EventSpinlock);
1702 vgdrvWaitFreeLocked(pDevExt, pWait);
1703 RTSpinlockRelease(pDevExt->EventSpinlock);
1704}
1705
1706
1707#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1708/**
1709 * Processes the wake-up list.
1710 *
1711 * All entries in the wake-up list gets signalled and moved to the woken-up
1712 * list.
1713 * At least on Windows this function can be invoked concurrently from
1714 * different VCPUs. So, be thread-safe.
1715 *
1716 * @param pDevExt The device extension.
1717 */
1718void VGDrvCommonWaitDoWakeUps(PVBOXGUESTDEVEXT pDevExt)
1719{
1720 if (!RTListIsEmpty(&pDevExt->WakeUpList))
1721 {
1722 RTSpinlockAcquire(pDevExt->EventSpinlock);
1723 for (;;)
1724 {
1725 int rc;
1726 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->WakeUpList, VBOXGUESTWAIT, ListNode);
1727 if (!pWait)
1728 break;
1729 /* Prevent other threads from accessing pWait when spinlock is released. */
1730 RTListNodeRemove(&pWait->ListNode);
1731
1732 pWait->fPendingWakeUp = true;
1733 RTSpinlockRelease(pDevExt->EventSpinlock);
1734
1735 rc = RTSemEventMultiSignal(pWait->Event);
1736 AssertRC(rc);
1737
1738 RTSpinlockAcquire(pDevExt->EventSpinlock);
1739 Assert(pWait->ListNode.pNext == NULL && pWait->ListNode.pPrev == NULL);
1740 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1741 pWait->fPendingWakeUp = false;
1742 if (RT_LIKELY(!pWait->fFreeMe))
1743 { /* likely */ }
1744 else
1745 {
1746 pWait->fFreeMe = false;
1747 vgdrvWaitFreeLocked(pDevExt, pWait);
1748 }
1749 }
1750 RTSpinlockRelease(pDevExt->EventSpinlock);
1751 }
1752}
1753#endif /* VBOXGUEST_USE_DEFERRED_WAKE_UP */
1754
1755
1756/**
1757 * Implements the fast (no input or output) type of IOCtls.
1758 *
1759 * This is currently just a placeholder stub inherited from the support driver code.
1760 *
1761 * @returns VBox status code.
1762 * @param iFunction The IOCtl function number.
1763 * @param pDevExt The device extension.
1764 * @param pSession The session.
1765 */
1766int VGDrvCommonIoCtlFast(uintptr_t iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1767{
1768 LogFlow(("VGDrvCommonIoCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1769
1770 NOREF(iFunction);
1771 NOREF(pDevExt);
1772 NOREF(pSession);
1773 return VERR_NOT_SUPPORTED;
1774}
1775
1776
1777/**
1778 * Gets the driver I/O control interface version, maybe adjusting it for
1779 * backwards compatibility.
1780 *
1781 * The adjusting is currently not implemented as we only have one major I/O
1782 * control interface version out there to support. This is something we will
1783 * implement as needed.
1784 *
1785 * returns IPRT status code.
1786 * @param pDevExt The device extension.
1787 * @param pSession The session.
1788 * @param pReq The request info.
1789 */
1790static int vgdrvIoCtl_DriverVersionInfo(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCDRIVERVERSIONINFO pReq)
1791{
1792 int rc;
1793 LogFlow(("VBGL_IOCTL_DRIVER_VERSION_INFO: uReqVersion=%#x uMinVersion=%#x uReserved1=%#x uReserved2=%#x\n",
1794 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, pReq->u.In.uReserved1, pReq->u.In.uReserved2));
1795 RT_NOREF2(pDevExt, pSession);
1796
1797 /*
1798 * Input validation.
1799 */
1800 if ( pReq->u.In.uMinVersion <= pReq->u.In.uReqVersion
1801 && RT_HI_U16(pReq->u.In.uMinVersion) == RT_HI_U16(pReq->u.In.uReqVersion))
1802 {
1803 /*
1804 * Match the version.
1805 * The current logic is very simple, match the major interface version.
1806 */
1807 if ( pReq->u.In.uMinVersion <= VBGL_IOC_VERSION
1808 && RT_HI_U16(pReq->u.In.uMinVersion) == RT_HI_U16(VBGL_IOC_VERSION))
1809 rc = VINF_SUCCESS;
1810 else
1811 {
1812 LogRel(("VBGL_IOCTL_DRIVER_VERSION_INFO: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1813 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, VBGL_IOC_VERSION));
1814 rc = VERR_VERSION_MISMATCH;
1815 }
1816 }
1817 else
1818 {
1819 LogRel(("VBGL_IOCTL_DRIVER_VERSION_INFO: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
1820 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
1821 rc = VERR_INVALID_PARAMETER;
1822 }
1823
1824 pReq->u.Out.uSessionVersion = RT_SUCCESS(rc) ? VBGL_IOC_VERSION : UINT32_MAX;
1825 pReq->u.Out.uDriverVersion = VBGL_IOC_VERSION;
1826 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1827 pReq->u.Out.uReserved1 = 0;
1828 pReq->u.Out.uReserved2 = 0;
1829 return rc;
1830}
1831
1832
1833/**
1834 * Similar to vgdrvIoCtl_DriverVersionInfo, except its for IDC.
1835 *
1836 * returns IPRT status code.
1837 * @param pDevExt The device extension.
1838 * @param pSession The session.
1839 * @param pReq The request info.
1840 */
1841static int vgdrvIoCtl_IdcConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCIDCCONNECT pReq)
1842{
1843 int rc;
1844 LogFlow(("VBGL_IOCTL_IDC_CONNECT: u32MagicCookie=%#x uReqVersion=%#x uMinVersion=%#x uReserved=%#x\n",
1845 pReq->u.In.u32MagicCookie, pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, pReq->u.In.uReserved));
1846 Assert(pSession != NULL);
1847 RT_NOREF(pDevExt);
1848
1849 /*
1850 * Input validation.
1851 */
1852 if (pReq->u.In.u32MagicCookie == VBGL_IOCTL_IDC_CONNECT_MAGIC_COOKIE)
1853 {
1854 if ( pReq->u.In.uMinVersion <= pReq->u.In.uReqVersion
1855 && RT_HI_U16(pReq->u.In.uMinVersion) == RT_HI_U16(pReq->u.In.uReqVersion))
1856 {
1857 /*
1858 * Match the version.
1859 * The current logic is very simple, match the major interface version.
1860 */
1861 if ( pReq->u.In.uMinVersion <= VBGL_IOC_VERSION
1862 && RT_HI_U16(pReq->u.In.uMinVersion) == RT_HI_U16(VBGL_IOC_VERSION))
1863 {
1864 pReq->u.Out.pvSession = pSession;
1865 pReq->u.Out.uSessionVersion = VBGL_IOC_VERSION;
1866 pReq->u.Out.uDriverVersion = VBGL_IOC_VERSION;
1867 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1868 pReq->u.Out.uReserved1 = 0;
1869 pReq->u.Out.pvReserved2 = NULL;
1870 return VINF_SUCCESS;
1871
1872 }
1873 LogRel(("VBGL_IOCTL_IDC_CONNECT: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1874 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, VBGL_IOC_VERSION));
1875 rc = VERR_VERSION_MISMATCH;
1876 }
1877 else
1878 {
1879 LogRel(("VBGL_IOCTL_IDC_CONNECT: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
1880 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
1881 rc = VERR_INVALID_PARAMETER;
1882 }
1883
1884 pReq->u.Out.pvSession = NULL;
1885 pReq->u.Out.uSessionVersion = UINT32_MAX;
1886 pReq->u.Out.uDriverVersion = VBGL_IOC_VERSION;
1887 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1888 pReq->u.Out.uReserved1 = 0;
1889 pReq->u.Out.pvReserved2 = NULL;
1890 }
1891 else
1892 {
1893 LogRel(("VBGL_IOCTL_IDC_CONNECT: u32MagicCookie=%#x expected %#x!\n",
1894 pReq->u.In.u32MagicCookie, VBGL_IOCTL_IDC_CONNECT_MAGIC_COOKIE));
1895 rc = VERR_INVALID_PARAMETER;
1896 }
1897 return rc;
1898}
1899
1900
1901/**
1902 * Counterpart to vgdrvIoCtl_IdcConnect, destroys the session.
1903 *
1904 * returns IPRT status code.
1905 * @param pDevExt The device extension.
1906 * @param pSession The session.
1907 * @param pReq The request info.
1908 */
1909static int vgdrvIoCtl_IdcDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCIDCDISCONNECT pReq)
1910{
1911 LogFlow(("VBGL_IOCTL_IDC_DISCONNECT: pvSession=%p vs pSession=%p\n", pReq->u.In.pvSession, pSession));
1912 RT_NOREF(pDevExt);
1913 Assert(pSession != NULL);
1914
1915 if (pReq->u.In.pvSession == pSession)
1916 {
1917 VGDrvCommonCloseSession(pDevExt, pSession);
1918 return VINF_SUCCESS;
1919 }
1920 LogRel(("VBGL_IOCTL_IDC_DISCONNECT: In.pvSession=%p is not equal to pSession=%p!\n", pReq->u.In.pvSession, pSession));
1921 return VERR_INVALID_PARAMETER;
1922}
1923
1924
1925/**
1926 * Return the VMM device I/O info.
1927 *
1928 * returns IPRT status code.
1929 * @param pDevExt The device extension.
1930 * @param pInfo The request info.
1931 * @note Ring-0 only, caller checked.
1932 */
1933static int vgdrvIoCtl_GetVMMDevIoInfo(PVBOXGUESTDEVEXT pDevExt, PVBGLIOCGETVMMDEVIOINFO pInfo)
1934{
1935 LogFlow(("VBGL_IOCTL_GET_VMMDEV_IO_INFO\n"));
1936
1937 pInfo->u.Out.IoPort = pDevExt->IOPortBase;
1938 pInfo->u.Out.pvVmmDevMapping = pDevExt->pVMMDevMemory;
1939 pInfo->u.Out.auPadding[0] = 0;
1940#if HC_ARCH_BITS != 32
1941 pInfo->u.Out.auPadding[1] = 0;
1942 pInfo->u.Out.auPadding[2] = 0;
1943#endif
1944 return VINF_SUCCESS;
1945}
1946
1947
1948/**
1949 * Set the callback for the kernel mouse handler.
1950 *
1951 * returns IPRT status code.
1952 * @param pDevExt The device extension.
1953 * @param pNotify The new callback information.
1954 */
1955int vgdrvIoCtl_SetMouseNotifyCallback(PVBOXGUESTDEVEXT pDevExt, PVBGLIOCSETMOUSENOTIFYCALLBACK pNotify)
1956{
1957 LogFlow(("VBOXGUEST_IOCTL_SET_MOUSE_NOTIFY_CALLBACK: pfnNotify=%p pvUser=%p\n", pNotify->u.In.pfnNotify, pNotify->u.In.pvUser));
1958
1959#ifdef VBOXGUEST_MOUSE_NOTIFY_CAN_PREEMPT
1960 VGDrvNativeSetMouseNotifyCallback(pDevExt, pNotify);
1961#else
1962 RTSpinlockAcquire(pDevExt->EventSpinlock);
1963 pDevExt->pfnMouseNotifyCallback = pNotify->u.In.pfnNotify;
1964 pDevExt->pvMouseNotifyCallbackArg = pNotify->u.In.pvUser;
1965 RTSpinlockRelease(pDevExt->EventSpinlock);
1966#endif
1967 return VINF_SUCCESS;
1968}
1969
1970
1971/**
1972 * Worker vgdrvIoCtl_WaitEvent.
1973 *
1974 * The caller enters the spinlock, we leave it.
1975 *
1976 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
1977 */
1978DECLINLINE(int) vbdgCheckWaitEventCondition(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1979 PVBGLIOCWAITFOREVENTS pInfo, int iEvent, const uint32_t fReqEvents)
1980{
1981 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
1982 if (fMatches & VBOXGUEST_ACQUIRE_STYLE_EVENTS)
1983 fMatches &= vgdrvGetAllowedEventMaskForSession(pDevExt, pSession);
1984 if (fMatches || pSession->fPendingCancelWaitEvents)
1985 {
1986 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
1987 RTSpinlockRelease(pDevExt->EventSpinlock);
1988
1989 pInfo->u.Out.fEvents = fMatches;
1990 if (fReqEvents & ~((uint32_t)1 << iEvent))
1991 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x\n", pInfo->u.Out.fEvents));
1992 else
1993 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x/%d\n", pInfo->u.Out.fEvents, iEvent));
1994 pSession->fPendingCancelWaitEvents = false;
1995 return VINF_SUCCESS;
1996 }
1997
1998 RTSpinlockRelease(pDevExt->EventSpinlock);
1999 return VERR_TIMEOUT;
2000}
2001
2002
2003static int vgdrvIoCtl_WaitForEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2004 PVBGLIOCWAITFOREVENTS pInfo, bool fInterruptible)
2005{
2006 uint32_t const cMsTimeout = pInfo->u.In.cMsTimeOut;
2007 const uint32_t fReqEvents = pInfo->u.In.fEvents;
2008 uint32_t fResEvents;
2009 int iEvent;
2010 PVBOXGUESTWAIT pWait;
2011 int rc;
2012
2013 pInfo->u.Out.fEvents = 0; /* Note! This overwrites pInfo->u.In.* fields! */
2014
2015 /*
2016 * Copy and verify the input mask.
2017 */
2018 iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
2019 if (RT_UNLIKELY(iEvent < 0))
2020 {
2021 LogRel(("VBOXGUEST_IOCTL_WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
2022 return VERR_INVALID_PARAMETER;
2023 }
2024
2025 /*
2026 * Check the condition up front, before doing the wait-for-event allocations.
2027 */
2028 RTSpinlockAcquire(pDevExt->EventSpinlock);
2029 rc = vbdgCheckWaitEventCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
2030 if (rc == VINF_SUCCESS)
2031 return rc;
2032
2033 if (!cMsTimeout)
2034 {
2035 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns VERR_TIMEOUT\n"));
2036 return VERR_TIMEOUT;
2037 }
2038
2039 pWait = vgdrvWaitAlloc(pDevExt, pSession);
2040 if (!pWait)
2041 return VERR_NO_MEMORY;
2042 pWait->fReqEvents = fReqEvents;
2043
2044 /*
2045 * We've got the wait entry now, re-enter the spinlock and check for the condition.
2046 * If the wait condition is met, return.
2047 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
2048 */
2049 RTSpinlockAcquire(pDevExt->EventSpinlock);
2050 RTListAppend(&pDevExt->WaitList, &pWait->ListNode);
2051 rc = vbdgCheckWaitEventCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
2052 if (rc == VINF_SUCCESS)
2053 {
2054 vgdrvWaitFreeUnlocked(pDevExt, pWait);
2055 return rc;
2056 }
2057
2058 if (fInterruptible)
2059 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMsTimeout == UINT32_MAX ? RT_INDEFINITE_WAIT : cMsTimeout);
2060 else
2061 rc = RTSemEventMultiWait(pWait->Event, cMsTimeout == UINT32_MAX ? RT_INDEFINITE_WAIT : cMsTimeout);
2062
2063 /*
2064 * There is one special case here and that's when the semaphore is
2065 * destroyed upon device driver unload. This shouldn't happen of course,
2066 * but in case it does, just get out of here ASAP.
2067 */
2068 if (rc == VERR_SEM_DESTROYED)
2069 return rc;
2070
2071 /*
2072 * Unlink the wait item and dispose of it.
2073 */
2074 RTSpinlockAcquire(pDevExt->EventSpinlock);
2075 fResEvents = pWait->fResEvents;
2076 vgdrvWaitFreeLocked(pDevExt, pWait);
2077 RTSpinlockRelease(pDevExt->EventSpinlock);
2078
2079 /*
2080 * Now deal with the return code.
2081 */
2082 if ( fResEvents
2083 && fResEvents != UINT32_MAX)
2084 {
2085 pInfo->u.Out.fEvents = fResEvents;
2086 if (fReqEvents & ~((uint32_t)1 << iEvent))
2087 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x\n", pInfo->u.Out.fEvents));
2088 else
2089 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x/%d\n", pInfo->u.Out.fEvents, iEvent));
2090 rc = VINF_SUCCESS;
2091 }
2092 else if ( fResEvents == UINT32_MAX
2093 || rc == VERR_INTERRUPTED)
2094 {
2095 rc = VERR_INTERRUPTED;
2096 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns VERR_INTERRUPTED\n"));
2097 }
2098 else if (rc == VERR_TIMEOUT)
2099 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns VERR_TIMEOUT (2)\n"));
2100 else
2101 {
2102 if (RT_SUCCESS(rc))
2103 {
2104 LogRelMax(32, ("VBOXGUEST_IOCTL_WAITEVENT: returns %Rrc but no events!\n", rc));
2105 rc = VERR_INTERNAL_ERROR;
2106 }
2107 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %Rrc\n", rc));
2108 }
2109
2110 return rc;
2111}
2112
2113
2114/** @todo the semantics of this IoCtl have been tightened, so that no calls to
2115 * VBOXGUEST_IOCTL_WAITEVENT are allowed in a session after it has been
2116 * called. Change the code to make calls to VBOXGUEST_IOCTL_WAITEVENT made
2117 * after that to return VERR_INTERRUPTED or something appropriate. */
2118static int vgdrvIoCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2119{
2120 PVBOXGUESTWAIT pWait;
2121 PVBOXGUESTWAIT pSafe;
2122 int rc = 0;
2123 /* Was as least one WAITEVENT in process for this session? If not we
2124 * set a flag that the next call should be interrupted immediately. This
2125 * is needed so that a user thread can reliably interrupt another one in a
2126 * WAITEVENT loop. */
2127 bool fCancelledOne = false;
2128
2129 LogFlow(("VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS\n"));
2130
2131 /*
2132 * Walk the event list and wake up anyone with a matching session.
2133 */
2134 RTSpinlockAcquire(pDevExt->EventSpinlock);
2135 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
2136 {
2137 if (pWait->pSession == pSession)
2138 {
2139 fCancelledOne = true;
2140 pWait->fResEvents = UINT32_MAX;
2141 RTListNodeRemove(&pWait->ListNode);
2142#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2143 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
2144#else
2145 rc |= RTSemEventMultiSignal(pWait->Event);
2146 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
2147#endif
2148 }
2149 }
2150 if (!fCancelledOne)
2151 pSession->fPendingCancelWaitEvents = true;
2152 RTSpinlockRelease(pDevExt->EventSpinlock);
2153 Assert(rc == 0);
2154 NOREF(rc);
2155
2156#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2157 VGDrvCommonWaitDoWakeUps(pDevExt);
2158#endif
2159
2160 return VINF_SUCCESS;
2161}
2162
2163
2164/**
2165 * Checks if the VMM request is allowed in the context of the given session.
2166 *
2167 * @returns VINF_SUCCESS or VERR_PERMISSION_DENIED.
2168 * @param pDevExt The device extension.
2169 * @param pSession The calling session.
2170 * @param enmType The request type.
2171 * @param pReqHdr The request.
2172 */
2173static int vgdrvCheckIfVmmReqIsAllowed(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VMMDevRequestType enmType,
2174 VMMDevRequestHeader const *pReqHdr)
2175{
2176 /*
2177 * Categorize the request being made.
2178 */
2179 /** @todo This need quite some more work! */
2180 enum
2181 {
2182 kLevel_Invalid, kLevel_NoOne, kLevel_OnlyVBoxGuest, kLevel_OnlyKernel, kLevel_TrustedUsers, kLevel_AllUsers
2183 } enmRequired;
2184 RT_NOREF1(pDevExt);
2185
2186 switch (enmType)
2187 {
2188 /*
2189 * Deny access to anything we don't know or provide specialized I/O controls for.
2190 */
2191#ifdef VBOX_WITH_HGCM
2192 case VMMDevReq_HGCMConnect:
2193 case VMMDevReq_HGCMDisconnect:
2194# ifdef VBOX_WITH_64_BITS_GUESTS
2195 case VMMDevReq_HGCMCall32:
2196 case VMMDevReq_HGCMCall64:
2197# else
2198 case VMMDevReq_HGCMCall:
2199# endif /* VBOX_WITH_64_BITS_GUESTS */
2200 case VMMDevReq_HGCMCancel:
2201 case VMMDevReq_HGCMCancel2:
2202#endif /* VBOX_WITH_HGCM */
2203 case VMMDevReq_SetGuestCapabilities:
2204 default:
2205 enmRequired = kLevel_NoOne;
2206 break;
2207
2208 /*
2209 * There are a few things only this driver can do (and it doesn't use
2210 * the VMMRequst I/O control route anyway, but whatever).
2211 */
2212 case VMMDevReq_ReportGuestInfo:
2213 case VMMDevReq_ReportGuestInfo2:
2214 case VMMDevReq_GetHypervisorInfo:
2215 case VMMDevReq_SetHypervisorInfo:
2216 case VMMDevReq_RegisterPatchMemory:
2217 case VMMDevReq_DeregisterPatchMemory:
2218 case VMMDevReq_GetMemBalloonChangeRequest:
2219 enmRequired = kLevel_OnlyVBoxGuest;
2220 break;
2221
2222 /*
2223 * Trusted users apps only.
2224 */
2225 case VMMDevReq_QueryCredentials:
2226 case VMMDevReq_ReportCredentialsJudgement:
2227 case VMMDevReq_RegisterSharedModule:
2228 case VMMDevReq_UnregisterSharedModule:
2229 case VMMDevReq_WriteCoreDump:
2230 case VMMDevReq_GetCpuHotPlugRequest:
2231 case VMMDevReq_SetCpuHotPlugStatus:
2232 case VMMDevReq_CheckSharedModules:
2233 case VMMDevReq_GetPageSharingStatus:
2234 case VMMDevReq_DebugIsPageShared:
2235 case VMMDevReq_ReportGuestStats:
2236 case VMMDevReq_ReportGuestUserState:
2237 case VMMDevReq_GetStatisticsChangeRequest:
2238 case VMMDevReq_ChangeMemBalloon:
2239 enmRequired = kLevel_TrustedUsers;
2240 break;
2241
2242 /*
2243 * Anyone.
2244 */
2245 case VMMDevReq_GetMouseStatus:
2246 case VMMDevReq_SetMouseStatus:
2247 case VMMDevReq_SetPointerShape:
2248 case VMMDevReq_GetHostVersion:
2249 case VMMDevReq_Idle:
2250 case VMMDevReq_GetHostTime:
2251 case VMMDevReq_SetPowerStatus:
2252 case VMMDevReq_AcknowledgeEvents:
2253 case VMMDevReq_CtlGuestFilterMask:
2254 case VMMDevReq_ReportGuestStatus:
2255 case VMMDevReq_GetDisplayChangeRequest:
2256 case VMMDevReq_VideoModeSupported:
2257 case VMMDevReq_GetHeightReduction:
2258 case VMMDevReq_GetDisplayChangeRequest2:
2259 case VMMDevReq_VideoModeSupported2:
2260 case VMMDevReq_VideoAccelEnable:
2261 case VMMDevReq_VideoAccelFlush:
2262 case VMMDevReq_VideoSetVisibleRegion:
2263 case VMMDevReq_GetDisplayChangeRequestEx:
2264 case VMMDevReq_GetSeamlessChangeRequest:
2265 case VMMDevReq_GetVRDPChangeRequest:
2266 case VMMDevReq_LogString:
2267 case VMMDevReq_GetSessionId:
2268 enmRequired = kLevel_AllUsers;
2269 break;
2270
2271 /*
2272 * Depends on the request parameters...
2273 */
2274 /** @todo this have to be changed into an I/O control and the facilities
2275 * tracked in the session so they can automatically be failed when the
2276 * session terminates without reporting the new status.
2277 *
2278 * The information presented by IGuest is not reliable without this! */
2279 case VMMDevReq_ReportGuestCapabilities:
2280 switch (((VMMDevReportGuestStatus const *)pReqHdr)->guestStatus.facility)
2281 {
2282 case VBoxGuestFacilityType_All:
2283 case VBoxGuestFacilityType_VBoxGuestDriver:
2284 enmRequired = kLevel_OnlyVBoxGuest;
2285 break;
2286 case VBoxGuestFacilityType_VBoxService:
2287 enmRequired = kLevel_TrustedUsers;
2288 break;
2289 case VBoxGuestFacilityType_VBoxTrayClient:
2290 case VBoxGuestFacilityType_Seamless:
2291 case VBoxGuestFacilityType_Graphics:
2292 default:
2293 enmRequired = kLevel_AllUsers;
2294 break;
2295 }
2296 break;
2297 }
2298
2299 /*
2300 * Check against the session.
2301 */
2302 switch (enmRequired)
2303 {
2304 default:
2305 case kLevel_NoOne:
2306 break;
2307 case kLevel_OnlyVBoxGuest:
2308 case kLevel_OnlyKernel:
2309 if (pSession->R0Process == NIL_RTR0PROCESS)
2310 return VINF_SUCCESS;
2311 break;
2312 case kLevel_TrustedUsers:
2313 case kLevel_AllUsers:
2314 return VINF_SUCCESS;
2315 }
2316
2317 return VERR_PERMISSION_DENIED;
2318}
2319
2320static int vgdrvIoCtl_VMMDevRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2321 VMMDevRequestHeader *pReqHdr, size_t cbData)
2322{
2323 int rc;
2324 VMMDevRequestHeader *pReqCopy;
2325
2326 /*
2327 * Validate the header and request size.
2328 */
2329 const VMMDevRequestType enmType = pReqHdr->requestType;
2330 const uint32_t cbReq = pReqHdr->size;
2331 const uint32_t cbMinSize = (uint32_t)vmmdevGetRequestSize(enmType);
2332
2333 LogFlow(("VBOXGUEST_IOCTL_VMMREQUEST: type %d\n", pReqHdr->requestType));
2334
2335 if (cbReq < cbMinSize)
2336 {
2337 LogRel(("VBOXGUEST_IOCTL_VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
2338 cbReq, cbMinSize, enmType));
2339 return VERR_INVALID_PARAMETER;
2340 }
2341 if (cbReq > cbData)
2342 {
2343 LogRel(("VBOXGUEST_IOCTL_VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
2344 cbData, cbReq, enmType));
2345 return VERR_INVALID_PARAMETER;
2346 }
2347 rc = VbglGR0Verify(pReqHdr, cbData);
2348 if (RT_FAILURE(rc))
2349 {
2350 Log(("VBOXGUEST_IOCTL_VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc!!\n",
2351 cbData, cbReq, enmType, rc));
2352 return rc;
2353 }
2354
2355 rc = vgdrvCheckIfVmmReqIsAllowed(pDevExt, pSession, enmType, pReqHdr);
2356 if (RT_FAILURE(rc))
2357 {
2358 Log(("VBOXGUEST_IOCTL_VMMREQUEST: Operation not allowed! type=%#x rc=%Rrc\n", enmType, rc));
2359 return rc;
2360 }
2361
2362 /*
2363 * Make a copy of the request in the physical memory heap so
2364 * the VBoxGuestLibrary can more easily deal with the request.
2365 * (This is really a waste of time since the OS or the OS specific
2366 * code has already buffered or locked the input/output buffer, but
2367 * it does makes things a bit simpler wrt to phys address.)
2368 */
2369 rc = VbglR0GRAlloc(&pReqCopy, cbReq, enmType);
2370 if (RT_FAILURE(rc))
2371 {
2372 Log(("VBOXGUEST_IOCTL_VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
2373 cbReq, cbReq, rc));
2374 return rc;
2375 }
2376 memcpy(pReqCopy, pReqHdr, cbReq);
2377 Assert(pReqCopy->reserved1 == cbReq);
2378 pReqCopy->reserved1 = 0; /* VGDrvCommonIoCtl or caller sets cbOut, so clear it. */
2379
2380 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
2381 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
2382
2383 rc = VbglR0GRPerform(pReqCopy);
2384 if ( RT_SUCCESS(rc)
2385 && RT_SUCCESS(pReqCopy->rc))
2386 {
2387 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
2388 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
2389
2390 memcpy(pReqHdr, pReqCopy, cbReq);
2391 pReqHdr->reserved1 = cbReq; /* preserve cbOut */
2392 }
2393 else if (RT_FAILURE(rc))
2394 Log(("VBOXGUEST_IOCTL_VMMREQUEST: VbglR0GRPerform - rc=%Rrc!\n", rc));
2395 else
2396 {
2397 Log(("VBOXGUEST_IOCTL_VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
2398 rc = pReqCopy->rc;
2399 }
2400
2401 VbglR0GRFree(pReqCopy);
2402 return rc;
2403}
2404
2405
2406#ifdef VBOX_WITH_HGCM
2407
2408AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
2409
2410/** Worker for vgdrvHgcmAsyncWaitCallback*. */
2411static int vgdrvHgcmAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
2412 bool fInterruptible, uint32_t cMillies)
2413{
2414 int rc;
2415
2416 /*
2417 * Check to see if the condition was met by the time we got here.
2418 *
2419 * We create a simple poll loop here for dealing with out-of-memory
2420 * conditions since the caller isn't necessarily able to deal with
2421 * us returning too early.
2422 */
2423 PVBOXGUESTWAIT pWait;
2424 for (;;)
2425 {
2426 RTSpinlockAcquire(pDevExt->EventSpinlock);
2427 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
2428 {
2429 RTSpinlockRelease(pDevExt->EventSpinlock);
2430 return VINF_SUCCESS;
2431 }
2432 RTSpinlockRelease(pDevExt->EventSpinlock);
2433
2434 pWait = vgdrvWaitAlloc(pDevExt, NULL);
2435 if (pWait)
2436 break;
2437 if (fInterruptible)
2438 return VERR_INTERRUPTED;
2439 RTThreadSleep(1);
2440 }
2441 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
2442 pWait->pHGCMReq = pHdr;
2443
2444 /*
2445 * Re-enter the spinlock and re-check for the condition.
2446 * If the condition is met, return.
2447 * Otherwise link us into the HGCM wait list and go to sleep.
2448 */
2449 RTSpinlockAcquire(pDevExt->EventSpinlock);
2450 RTListAppend(&pDevExt->HGCMWaitList, &pWait->ListNode);
2451 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
2452 {
2453 vgdrvWaitFreeLocked(pDevExt, pWait);
2454 RTSpinlockRelease(pDevExt->EventSpinlock);
2455 return VINF_SUCCESS;
2456 }
2457 RTSpinlockRelease(pDevExt->EventSpinlock);
2458
2459 if (fInterruptible)
2460 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
2461 else
2462 rc = RTSemEventMultiWait(pWait->Event, cMillies);
2463 if (rc == VERR_SEM_DESTROYED)
2464 return rc;
2465
2466 /*
2467 * Unlink, free and return.
2468 */
2469 if ( RT_FAILURE(rc)
2470 && rc != VERR_TIMEOUT
2471 && ( !fInterruptible
2472 || rc != VERR_INTERRUPTED))
2473 LogRel(("vgdrvHgcmAsyncWaitCallback: wait failed! %Rrc\n", rc));
2474
2475 vgdrvWaitFreeUnlocked(pDevExt, pWait);
2476 return rc;
2477}
2478
2479
2480/**
2481 * This is a callback for dealing with async waits.
2482 *
2483 * It operates in a manner similar to vgdrvIoCtl_WaitEvent.
2484 */
2485static DECLCALLBACK(int) vgdrvHgcmAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
2486{
2487 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
2488 LogFlow(("vgdrvHgcmAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
2489 return vgdrvHgcmAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr, pDevExt,
2490 false /* fInterruptible */, u32User /* cMillies */);
2491}
2492
2493
2494/**
2495 * This is a callback for dealing with async waits with a timeout.
2496 *
2497 * It operates in a manner similar to vgdrvIoCtl_WaitEvent.
2498 */
2499static DECLCALLBACK(int) vgdrvHgcmAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
2500{
2501 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
2502 LogFlow(("vgdrvHgcmAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
2503 return vgdrvHgcmAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr, pDevExt,
2504 true /* fInterruptible */, u32User /* cMillies */);
2505}
2506
2507
2508static int vgdrvIoCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCHGCMCONNECT pInfo)
2509{
2510 int rc;
2511 HGCMCLIENTID idClient = 0;
2512
2513 /*
2514 * The VbglHGCMConnect call will invoke the callback if the HGCM
2515 * call is performed in an ASYNC fashion. The function is not able
2516 * to deal with cancelled requests.
2517 */
2518 Log(("VBOXGUEST_IOCTL_HGCM_CONNECT: %.128s\n",
2519 pInfo->u.In.Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->u.In.Loc.type == VMMDevHGCMLoc_LocalHost_Existing
2520 ? pInfo->u.In.Loc.u.host.achName : "<not local host>"));
2521
2522 rc = VbglR0HGCMInternalConnect(&pInfo->u.In.Loc, &idClient, vgdrvHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2523 Log(("VBOXGUEST_IOCTL_HGCM_CONNECT: idClient=%RX32 (rc=%Rrc)\n", idClient, rc));
2524 if (RT_SUCCESS(rc))
2525 {
2526 /*
2527 * Append the client id to the client id table.
2528 * If the table has somehow become filled up, we'll disconnect the session.
2529 */
2530 unsigned i;
2531 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2532 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2533 if (!pSession->aHGCMClientIds[i])
2534 {
2535 pSession->aHGCMClientIds[i] = idClient;
2536 break;
2537 }
2538 RTSpinlockRelease(pDevExt->SessionSpinlock);
2539 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
2540 {
2541 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
2542 VbglR0HGCMInternalDisconnect(idClient, vgdrvHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2543
2544 pInfo->u.Out.idClient = 0;
2545 return VERR_TOO_MANY_OPEN_FILES;
2546 }
2547 }
2548 pInfo->u.Out.idClient = idClient;
2549 return rc;
2550}
2551
2552
2553static int vgdrvIoCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCHGCMDISCONNECT pInfo)
2554{
2555 /*
2556 * Validate the client id and invalidate its entry while we're in the call.
2557 */
2558 int rc;
2559 const uint32_t idClient = pInfo->u.In.idClient;
2560 unsigned i;
2561 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2562 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2563 if (pSession->aHGCMClientIds[i] == idClient)
2564 {
2565 pSession->aHGCMClientIds[i] = UINT32_MAX;
2566 break;
2567 }
2568 RTSpinlockRelease(pDevExt->SessionSpinlock);
2569 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
2570 {
2571 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_DISCONNECT: idClient=%RX32\n", idClient));
2572 return VERR_INVALID_HANDLE;
2573 }
2574
2575 /*
2576 * The VbglHGCMConnect call will invoke the callback if the HGCM
2577 * call is performed in an ASYNC fashion. The function is not able
2578 * to deal with cancelled requests.
2579 */
2580 Log(("VBOXGUEST_IOCTL_HGCM_DISCONNECT: idClient=%RX32\n", idClient));
2581 rc = VbglR0HGCMInternalDisconnect(idClient, vgdrvHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2582 LogFlow(("VBOXGUEST_IOCTL_HGCM_DISCONNECT: rc=%Rrc\n", rc));
2583
2584 /* Update the client id array according to the result. */
2585 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2586 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
2587 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) ? 0 : idClient;
2588 RTSpinlockRelease(pDevExt->SessionSpinlock);
2589
2590 return rc;
2591}
2592
2593
2594static int vgdrvIoCtl_HGCMCallInner(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCHGCMCALL pInfo,
2595 uint32_t cMillies, bool fInterruptible, bool f32bit, bool fUserData,
2596 size_t cbExtra, size_t cbData)
2597{
2598 const uint32_t u32ClientId = pInfo->u32ClientID;
2599 uint32_t fFlags;
2600 size_t cbActual;
2601 unsigned i;
2602 int rc;
2603
2604 /*
2605 * Some more validations.
2606 */
2607 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
2608 {
2609 LogRel(("VBOXGUEST_IOCTL_HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
2610 return VERR_INVALID_PARAMETER;
2611 }
2612
2613 cbActual = cbExtra + sizeof(*pInfo);
2614#ifdef RT_ARCH_AMD64
2615 if (f32bit)
2616 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
2617 else
2618#endif
2619 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
2620 if (cbData < cbActual)
2621 {
2622 LogRel(("VBOXGUEST_IOCTL_HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
2623 cbData, cbData, cbActual, cbActual));
2624 return VERR_INVALID_PARAMETER;
2625 }
2626 pInfo->Hdr.cbOut = (uint32_t)cbActual;
2627
2628 /*
2629 * Validate the client id.
2630 */
2631 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2632 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2633 if (pSession->aHGCMClientIds[i] == u32ClientId)
2634 break;
2635 RTSpinlockRelease(pDevExt->SessionSpinlock);
2636 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
2637 {
2638 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
2639 return VERR_INVALID_HANDLE;
2640 }
2641
2642 /*
2643 * The VbglHGCMCall call will invoke the callback if the HGCM
2644 * call is performed in an ASYNC fashion. This function can
2645 * deal with cancelled requests, so we let user more requests
2646 * be interruptible (should add a flag for this later I guess).
2647 */
2648 LogFlow(("VBOXGUEST_IOCTL_HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
2649 fFlags = !fUserData && pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
2650 uint32_t cbInfo = (uint32_t)(cbData - cbExtra);
2651#ifdef RT_ARCH_AMD64
2652 if (f32bit)
2653 {
2654 if (fInterruptible)
2655 rc = VbglR0HGCMInternalCall32(pInfo, cbInfo, fFlags, vgdrvHgcmAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2656 else
2657 rc = VbglR0HGCMInternalCall32(pInfo, cbInfo, fFlags, vgdrvHgcmAsyncWaitCallback, pDevExt, cMillies);
2658 }
2659 else
2660#endif
2661 {
2662 if (fInterruptible)
2663 rc = VbglR0HGCMInternalCall(pInfo, cbInfo, fFlags, vgdrvHgcmAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2664 else
2665 rc = VbglR0HGCMInternalCall(pInfo, cbInfo, fFlags, vgdrvHgcmAsyncWaitCallback, pDevExt, cMillies);
2666 }
2667 if (RT_SUCCESS(rc))
2668 {
2669 rc = pInfo->Hdr.rc;
2670 LogFlow(("VBOXGUEST_IOCTL_HGCM_CALL: result=%Rrc\n", rc));
2671 }
2672 else
2673 {
2674 if ( rc != VERR_INTERRUPTED
2675 && rc != VERR_TIMEOUT)
2676 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_CALL: %s Failed. rc=%Rrc (Hdr.rc=%Rrc).\n", f32bit ? "32" : "64", rc, pInfo->Hdr.rc));
2677 else
2678 Log(("VBOXGUEST_IOCTL_HGCM_CALL: %s Failed. rc=%Rrc (Hdr.rc=%Rrc).\n", f32bit ? "32" : "64", rc, pInfo->Hdr.rc));
2679 }
2680 return rc;
2681}
2682
2683
2684static int vgdrvIoCtl_HGCMCallWrapper(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCHGCMCALL pInfo,
2685 bool f32bit, bool fUserData, size_t cbData)
2686{
2687 return vgdrvIoCtl_HGCMCallInner(pDevExt, pSession, pInfo, pInfo->cMsTimeout,
2688 pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2689 f32bit, fUserData, 0 /*cbExtra*/, cbData);
2690}
2691
2692
2693#endif /* VBOX_WITH_HGCM */
2694
2695/**
2696 * Handle VBGL_IOCTL_CHECK_BALLOON from R3.
2697 *
2698 * Ask the host for the size of the balloon and try to set it accordingly. If
2699 * this approach fails because it's not supported, return with fHandleInR3 set
2700 * and let the user land supply memory we can lock via the other ioctl.
2701 *
2702 * @returns VBox status code.
2703 *
2704 * @param pDevExt The device extension.
2705 * @param pSession The session.
2706 * @param pInfo The output buffer.
2707 */
2708static int vgdrvIoCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCCHECKBALLOON pInfo)
2709{
2710 VMMDevGetMemBalloonChangeRequest *pReq;
2711 int rc;
2712
2713 LogFlow(("VBGL_IOCTL_CHECK_BALLOON:\n"));
2714 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2715 AssertRCReturn(rc, rc);
2716
2717 /*
2718 * The first user trying to query/change the balloon becomes the
2719 * owner and owns it until the session is closed (vgdrvCloseMemBalloon).
2720 */
2721 if ( pDevExt->MemBalloon.pOwner != pSession
2722 && pDevExt->MemBalloon.pOwner == NULL)
2723 pDevExt->MemBalloon.pOwner = pSession;
2724
2725 if (pDevExt->MemBalloon.pOwner == pSession)
2726 {
2727 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
2728 if (RT_SUCCESS(rc))
2729 {
2730 /*
2731 * This is a response to that event. Setting this bit means that
2732 * we request the value from the host and change the guest memory
2733 * balloon according to this value.
2734 */
2735 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
2736 rc = VbglR0GRPerform(&pReq->header);
2737 if (RT_SUCCESS(rc))
2738 {
2739 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
2740 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
2741
2742 pInfo->u.Out.cBalloonChunks = pReq->cBalloonChunks;
2743 pInfo->u.Out.fHandleInR3 = false;
2744 pInfo->u.Out.afPadding[0] = false;
2745 pInfo->u.Out.afPadding[1] = false;
2746 pInfo->u.Out.afPadding[2] = false;
2747
2748 rc = vgdrvSetBalloonSizeKernel(pDevExt, pReq->cBalloonChunks, &pInfo->u.Out.fHandleInR3);
2749 /* Ignore various out of memory failures. */
2750 if ( rc == VERR_NO_MEMORY
2751 || rc == VERR_NO_PHYS_MEMORY
2752 || rc == VERR_NO_CONT_MEMORY)
2753 rc = VINF_SUCCESS;
2754 }
2755 else
2756 LogRel(("VBGL_IOCTL_CHECK_BALLOON: VbglR0GRPerform failed. rc=%Rrc\n", rc));
2757 VbglR0GRFree(&pReq->header);
2758 }
2759 }
2760 else
2761 rc = VERR_PERMISSION_DENIED;
2762
2763 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2764 LogFlow(("VBGL_IOCTL_CHECK_BALLOON returns %Rrc\n", rc));
2765 return rc;
2766}
2767
2768
2769/**
2770 * Handle a request for changing the memory balloon.
2771 *
2772 * @returns VBox status code.
2773 *
2774 * @param pDevExt The device extention.
2775 * @param pSession The session.
2776 * @param pInfo The change request structure (input).
2777 */
2778static int vgdrvIoCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCCHANGEBALLOON pInfo)
2779{
2780 int rc;
2781 LogFlow(("VBGL_IOCTL_CHANGE_BALLOON: fInflate=%RTbool u64ChunkAddr=%p\n", pInfo->u.In.fInflate, pInfo->u.In.pvChunk));
2782 if ( pInfo->u.In.abPadding[0]
2783 || pInfo->u.In.abPadding[1]
2784 || pInfo->u.In.abPadding[2]
2785 || pInfo->u.In.abPadding[3]
2786 || pInfo->u.In.abPadding[4]
2787 || pInfo->u.In.abPadding[5]
2788 || pInfo->u.In.abPadding[6]
2789#if ARCH_BITS == 32
2790 || pInfo->u.In.abPadding[7]
2791 || pInfo->u.In.abPadding[8]
2792 || pInfo->u.In.abPadding[9]
2793#endif
2794 )
2795 {
2796 Log(("VBGL_IOCTL_CHANGE_BALLOON: Padding isn't all zero: %.*Rhxs\n", sizeof(pInfo->u.In.abPadding), pInfo->u.In.abPadding));
2797 return VERR_INVALID_PARAMETER;
2798 }
2799
2800 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2801 AssertRCReturn(rc, rc);
2802
2803 if (!pDevExt->MemBalloon.fUseKernelAPI)
2804 {
2805 /*
2806 * The first user trying to query/change the balloon becomes the
2807 * owner and owns it until the session is closed (vgdrvCloseMemBalloon).
2808 */
2809 if ( pDevExt->MemBalloon.pOwner != pSession
2810 && pDevExt->MemBalloon.pOwner == NULL)
2811 pDevExt->MemBalloon.pOwner = pSession;
2812
2813 if (pDevExt->MemBalloon.pOwner == pSession)
2814 rc = vgdrvSetBalloonSizeFromUser(pDevExt, pSession, pInfo->u.In.pvChunk, pInfo->u.In.fInflate != false);
2815 else
2816 rc = VERR_PERMISSION_DENIED;
2817 }
2818 else
2819 rc = VERR_PERMISSION_DENIED;
2820
2821 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2822 return rc;
2823}
2824
2825
2826/**
2827 * Handle a request for writing a core dump of the guest on the host.
2828 *
2829 * @returns VBox status code.
2830 *
2831 * @param pDevExt The device extension.
2832 * @param pInfo The output buffer.
2833 */
2834static int vgdrvIoCtl_WriteCoreDump(PVBOXGUESTDEVEXT pDevExt, PVBGLIOCWRITECOREDUMP pInfo)
2835{
2836 VMMDevReqWriteCoreDump *pReq = NULL;
2837 int rc;
2838 LogFlow(("VBOXGUEST_IOCTL_WRITE_CORE_DUMP\n"));
2839 RT_NOREF1(pDevExt);
2840
2841 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_WriteCoreDump);
2842 if (RT_SUCCESS(rc))
2843 {
2844 pReq->fFlags = pInfo->u.In.fFlags;
2845 rc = VbglR0GRPerform(&pReq->header);
2846 if (RT_FAILURE(rc))
2847 Log(("VBOXGUEST_IOCTL_WRITE_CORE_DUMP: VbglR0GRPerform failed, rc=%Rrc!\n", rc));
2848
2849 VbglR0GRFree(&pReq->header);
2850 }
2851 else
2852 Log(("VBOXGUEST_IOCTL_WRITE_CORE_DUMP: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
2853 sizeof(*pReq), sizeof(*pReq), rc));
2854 return rc;
2855}
2856
2857
2858/**
2859 * Guest backdoor logging.
2860 *
2861 * @returns VBox status code.
2862 *
2863 * @param pDevExt The device extension.
2864 * @param pch The log message (need not be NULL terminated).
2865 * @param cbData Size of the buffer.
2866 * @param fUserSession Copy of VBOXGUESTSESSION::fUserSession for the
2867 * call. True normal user, false root user.
2868 */
2869static int vgdrvIoCtl_Log(PVBOXGUESTDEVEXT pDevExt, const char *pch, size_t cbData, bool fUserSession)
2870{
2871 if (pDevExt->fLoggingEnabled)
2872 RTLogBackdoorPrintf("%.*s", cbData, pch);
2873 else if (!fUserSession)
2874 LogRel(("%.*s", cbData, pch));
2875 else
2876 Log(("%.*s", cbData, pch));
2877 return VINF_SUCCESS;
2878}
2879
2880
2881/** @name Guest Capabilities, Mouse Status and Event Filter
2882 * @{
2883 */
2884
2885/**
2886 * Clears a bit usage tracker (init time).
2887 *
2888 * @param pTracker The tracker to clear.
2889 */
2890static void vgdrvBitUsageTrackerClear(PVBOXGUESTBITUSAGETRACER pTracker)
2891{
2892 uint32_t iBit;
2893 AssertCompile(sizeof(pTracker->acPerBitUsage) == 32 * sizeof(uint32_t));
2894
2895 for (iBit = 0; iBit < 32; iBit++)
2896 pTracker->acPerBitUsage[iBit] = 0;
2897 pTracker->fMask = 0;
2898}
2899
2900
2901#ifdef VBOX_STRICT
2902/**
2903 * Checks that pTracker->fMask is correct and that the usage values are within
2904 * the valid range.
2905 *
2906 * @param pTracker The tracker.
2907 * @param cMax Max valid usage value.
2908 * @param pszWhat Identifies the tracker in assertions.
2909 */
2910static void vgdrvBitUsageTrackerCheckMask(PCVBOXGUESTBITUSAGETRACER pTracker, uint32_t cMax, const char *pszWhat)
2911{
2912 uint32_t fMask = 0;
2913 uint32_t iBit;
2914 AssertCompile(sizeof(pTracker->acPerBitUsage) == 32 * sizeof(uint32_t));
2915
2916 for (iBit = 0; iBit < 32; iBit++)
2917 if (pTracker->acPerBitUsage[iBit])
2918 {
2919 fMask |= RT_BIT_32(iBit);
2920 AssertMsg(pTracker->acPerBitUsage[iBit] <= cMax,
2921 ("%s: acPerBitUsage[%u]=%#x cMax=%#x\n", pszWhat, iBit, pTracker->acPerBitUsage[iBit], cMax));
2922 }
2923
2924 AssertMsg(fMask == pTracker->fMask, ("%s: %#x vs %#x\n", pszWhat, fMask, pTracker->fMask));
2925}
2926#endif
2927
2928
2929/**
2930 * Applies a change to the bit usage tracker.
2931 *
2932 *
2933 * @returns true if the mask changed, false if not.
2934 * @param pTracker The bit usage tracker.
2935 * @param fChanged The bits to change.
2936 * @param fPrevious The previous value of the bits.
2937 * @param cMax The max valid usage value for assertions.
2938 * @param pszWhat Identifies the tracker in assertions.
2939 */
2940static bool vgdrvBitUsageTrackerChange(PVBOXGUESTBITUSAGETRACER pTracker, uint32_t fChanged, uint32_t fPrevious,
2941 uint32_t cMax, const char *pszWhat)
2942{
2943 bool fGlobalChange = false;
2944 AssertCompile(sizeof(pTracker->acPerBitUsage) == 32 * sizeof(uint32_t));
2945
2946 while (fChanged)
2947 {
2948 uint32_t const iBit = ASMBitFirstSetU32(fChanged) - 1;
2949 uint32_t const fBitMask = RT_BIT_32(iBit);
2950 Assert(iBit < 32); Assert(fBitMask & fChanged);
2951
2952 if (fBitMask & fPrevious)
2953 {
2954 pTracker->acPerBitUsage[iBit] -= 1;
2955 AssertMsg(pTracker->acPerBitUsage[iBit] <= cMax,
2956 ("%s: acPerBitUsage[%u]=%#x cMax=%#x\n", pszWhat, iBit, pTracker->acPerBitUsage[iBit], cMax));
2957 if (pTracker->acPerBitUsage[iBit] == 0)
2958 {
2959 fGlobalChange = true;
2960 pTracker->fMask &= ~fBitMask;
2961 }
2962 }
2963 else
2964 {
2965 pTracker->acPerBitUsage[iBit] += 1;
2966 AssertMsg(pTracker->acPerBitUsage[iBit] > 0 && pTracker->acPerBitUsage[iBit] <= cMax,
2967 ("pTracker->acPerBitUsage[%u]=%#x cMax=%#x\n", pszWhat, iBit, pTracker->acPerBitUsage[iBit], cMax));
2968 if (pTracker->acPerBitUsage[iBit] == 1)
2969 {
2970 fGlobalChange = true;
2971 pTracker->fMask |= fBitMask;
2972 }
2973 }
2974
2975 fChanged &= ~fBitMask;
2976 }
2977
2978#ifdef VBOX_STRICT
2979 vgdrvBitUsageTrackerCheckMask(pTracker, cMax, pszWhat);
2980#endif
2981 NOREF(pszWhat); NOREF(cMax);
2982 return fGlobalChange;
2983}
2984
2985
2986/**
2987 * Init and termination worker for resetting the (host) event filter on the host
2988 *
2989 * @returns VBox status code.
2990 * @param pDevExt The device extension.
2991 * @param fFixedEvents Fixed events (init time).
2992 */
2993static int vgdrvResetEventFilterOnHost(PVBOXGUESTDEVEXT pDevExt, uint32_t fFixedEvents)
2994{
2995 VMMDevCtlGuestFilterMask *pReq;
2996 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
2997 if (RT_SUCCESS(rc))
2998 {
2999 pReq->u32NotMask = UINT32_MAX & ~fFixedEvents;
3000 pReq->u32OrMask = fFixedEvents;
3001 rc = VbglR0GRPerform(&pReq->header);
3002 if (RT_FAILURE(rc))
3003 LogRelFunc(("failed with rc=%Rrc\n", rc));
3004 VbglR0GRFree(&pReq->header);
3005 }
3006 RT_NOREF1(pDevExt);
3007 return rc;
3008}
3009
3010
3011/**
3012 * Changes the event filter mask for the given session.
3013 *
3014 * This is called in response to VBGL_IOCTL_CHANGE_FILTER_MASK as well as to do
3015 * session cleanup.
3016 *
3017 * @returns VBox status code.
3018 * @param pDevExt The device extension.
3019 * @param pSession The session.
3020 * @param fOrMask The events to add.
3021 * @param fNotMask The events to remove.
3022 * @param fSessionTermination Set if we're called by the session cleanup code.
3023 * This tweaks the error handling so we perform
3024 * proper session cleanup even if the host
3025 * misbehaves.
3026 *
3027 * @remarks Takes the session spinlock.
3028 */
3029static int vgdrvSetSessionEventFilter(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
3030 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination)
3031{
3032 VMMDevCtlGuestFilterMask *pReq;
3033 uint32_t fChanged;
3034 uint32_t fPrevious;
3035 int rc;
3036
3037 /*
3038 * Preallocate a request buffer so we can do all in one go without leaving the spinlock.
3039 */
3040 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
3041 if (RT_SUCCESS(rc))
3042 { /* nothing */ }
3043 else if (!fSessionTermination)
3044 {
3045 LogRel(("vgdrvSetSessionFilterMask: VbglR0GRAlloc failure: %Rrc\n", rc));
3046 return rc;
3047 }
3048 else
3049 pReq = NULL; /* Ignore failure, we must do session cleanup. */
3050
3051
3052 RTSpinlockAcquire(pDevExt->SessionSpinlock);
3053
3054 /*
3055 * Apply the changes to the session mask.
3056 */
3057 fPrevious = pSession->fEventFilter;
3058 pSession->fEventFilter |= fOrMask;
3059 pSession->fEventFilter &= ~fNotMask;
3060
3061 /*
3062 * If anything actually changed, update the global usage counters.
3063 */
3064 fChanged = fPrevious ^ pSession->fEventFilter;
3065 LogFlow(("vgdrvSetSessionEventFilter: Session->fEventFilter: %#x -> %#x (changed %#x)\n",
3066 fPrevious, pSession->fEventFilter, fChanged));
3067 if (fChanged)
3068 {
3069 bool fGlobalChange = vgdrvBitUsageTrackerChange(&pDevExt->EventFilterTracker, fChanged, fPrevious,
3070 pDevExt->cSessions, "EventFilterTracker");
3071
3072 /*
3073 * If there are global changes, update the event filter on the host.
3074 */
3075 if (fGlobalChange || pDevExt->fEventFilterHost == UINT32_MAX)
3076 {
3077 Assert(pReq || fSessionTermination);
3078 if (pReq)
3079 {
3080 pReq->u32OrMask = pDevExt->fFixedEvents | pDevExt->EventFilterTracker.fMask;
3081 if (pReq->u32OrMask == pDevExt->fEventFilterHost)
3082 rc = VINF_SUCCESS;
3083 else
3084 {
3085 pDevExt->fEventFilterHost = pReq->u32OrMask;
3086 pReq->u32NotMask = ~pReq->u32OrMask;
3087 rc = VbglR0GRPerform(&pReq->header);
3088 if (RT_FAILURE(rc))
3089 {
3090 /*
3091 * Failed, roll back (unless it's session termination time).
3092 */
3093 pDevExt->fEventFilterHost = UINT32_MAX;
3094 if (!fSessionTermination)
3095 {
3096 vgdrvBitUsageTrackerChange(&pDevExt->EventFilterTracker, fChanged, pSession->fEventFilter,
3097 pDevExt->cSessions, "EventFilterTracker");
3098 pSession->fEventFilter = fPrevious;
3099 }
3100 }
3101 }
3102 }
3103 else
3104 rc = VINF_SUCCESS;
3105 }
3106 }
3107
3108 RTSpinlockRelease(pDevExt->SessionSpinlock);
3109 if (pReq)
3110 VbglR0GRFree(&pReq->header);
3111 return rc;
3112}
3113
3114
3115/**
3116 * Handle VBGL_IOCTL_CHANGE_FILTER_MASK.
3117 *
3118 * @returns VBox status code.
3119 *
3120 * @param pDevExt The device extension.
3121 * @param pSession The session.
3122 * @param pInfo The request.
3123 */
3124static int vgdrvIoCtl_ChangeFilterMask(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCCHANGEFILTERMASK pInfo)
3125{
3126 LogFlow(("VBGL_IOCTL_CHANGE_FILTER_MASK: or=%#x not=%#x\n", pInfo->u.In.fOrMask, pInfo->u.In.fNotMask));
3127
3128 if ((pInfo->u.In.fOrMask | pInfo->u.In.fNotMask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
3129 {
3130 Log(("VBGL_IOCTL_CHANGE_FILTER_MASK: or=%#x not=%#x: Invalid masks!\n", pInfo->u.In.fOrMask, pInfo->u.In.fNotMask));
3131 return VERR_INVALID_PARAMETER;
3132 }
3133
3134 return vgdrvSetSessionEventFilter(pDevExt, pSession, pInfo->u.In.fOrMask, pInfo->u.In.fNotMask, false /*fSessionTermination*/);
3135}
3136
3137
3138/**
3139 * Init and termination worker for set mouse feature status to zero on the host.
3140 *
3141 * @returns VBox status code.
3142 * @param pDevExt The device extension.
3143 */
3144static int vgdrvResetMouseStatusOnHost(PVBOXGUESTDEVEXT pDevExt)
3145{
3146 VMMDevReqMouseStatus *pReq;
3147 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetMouseStatus);
3148 if (RT_SUCCESS(rc))
3149 {
3150 pReq->mouseFeatures = 0;
3151 pReq->pointerXPos = 0;
3152 pReq->pointerYPos = 0;
3153 rc = VbglR0GRPerform(&pReq->header);
3154 if (RT_FAILURE(rc))
3155 LogRelFunc(("failed with rc=%Rrc\n", rc));
3156 VbglR0GRFree(&pReq->header);
3157 }
3158 RT_NOREF1(pDevExt);
3159 return rc;
3160}
3161
3162
3163/**
3164 * Changes the mouse status mask for the given session.
3165 *
3166 * This is called in response to VBOXGUEST_IOCTL_SET_MOUSE_STATUS as well as to
3167 * do session cleanup.
3168 *
3169 * @returns VBox status code.
3170 * @param pDevExt The device extension.
3171 * @param pSession The session.
3172 * @param fOrMask The status flags to add.
3173 * @param fNotMask The status flags to remove.
3174 * @param fSessionTermination Set if we're called by the session cleanup code.
3175 * This tweaks the error handling so we perform
3176 * proper session cleanup even if the host
3177 * misbehaves.
3178 *
3179 * @remarks Takes the session spinlock.
3180 */
3181static int vgdrvSetSessionMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
3182 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination)
3183{
3184 VMMDevReqMouseStatus *pReq;
3185 uint32_t fChanged;
3186 uint32_t fPrevious;
3187 int rc;
3188
3189 /*
3190 * Preallocate a request buffer so we can do all in one go without leaving the spinlock.
3191 */
3192 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetMouseStatus);
3193 if (RT_SUCCESS(rc))
3194 { /* nothing */ }
3195 else if (!fSessionTermination)
3196 {
3197 LogRel(("vgdrvSetSessionMouseStatus: VbglR0GRAlloc failure: %Rrc\n", rc));
3198 return rc;
3199 }
3200 else
3201 pReq = NULL; /* Ignore failure, we must do session cleanup. */
3202
3203
3204 RTSpinlockAcquire(pDevExt->SessionSpinlock);
3205
3206 /*
3207 * Apply the changes to the session mask.
3208 */
3209 fPrevious = pSession->fMouseStatus;
3210 pSession->fMouseStatus |= fOrMask;
3211 pSession->fMouseStatus &= ~fNotMask;
3212
3213 /*
3214 * If anything actually changed, update the global usage counters.
3215 */
3216 fChanged = fPrevious ^ pSession->fMouseStatus;
3217 if (fChanged)
3218 {
3219 bool fGlobalChange = vgdrvBitUsageTrackerChange(&pDevExt->MouseStatusTracker, fChanged, fPrevious,
3220 pDevExt->cSessions, "MouseStatusTracker");
3221
3222 /*
3223 * If there are global changes, update the event filter on the host.
3224 */
3225 if (fGlobalChange || pDevExt->fMouseStatusHost == UINT32_MAX)
3226 {
3227 Assert(pReq || fSessionTermination);
3228 if (pReq)
3229 {
3230 pReq->mouseFeatures = pDevExt->MouseStatusTracker.fMask;
3231 if (pReq->mouseFeatures == pDevExt->fMouseStatusHost)
3232 rc = VINF_SUCCESS;
3233 else
3234 {
3235 pDevExt->fMouseStatusHost = pReq->mouseFeatures;
3236 pReq->pointerXPos = 0;
3237 pReq->pointerYPos = 0;
3238 rc = VbglR0GRPerform(&pReq->header);
3239 if (RT_FAILURE(rc))
3240 {
3241 /*
3242 * Failed, roll back (unless it's session termination time).
3243 */
3244 pDevExt->fMouseStatusHost = UINT32_MAX;
3245 if (!fSessionTermination)
3246 {
3247 vgdrvBitUsageTrackerChange(&pDevExt->MouseStatusTracker, fChanged, pSession->fMouseStatus,
3248 pDevExt->cSessions, "MouseStatusTracker");
3249 pSession->fMouseStatus = fPrevious;
3250 }
3251 }
3252 }
3253 }
3254 else
3255 rc = VINF_SUCCESS;
3256 }
3257 }
3258
3259 RTSpinlockRelease(pDevExt->SessionSpinlock);
3260 if (pReq)
3261 VbglR0GRFree(&pReq->header);
3262 return rc;
3263}
3264
3265
3266/**
3267 * Sets the mouse status features for this session and updates them globally.
3268 *
3269 * @returns VBox status code.
3270 *
3271 * @param pDevExt The device extention.
3272 * @param pSession The session.
3273 * @param fFeatures New bitmap of enabled features.
3274 */
3275static int vgdrvIoCtl_SetMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fFeatures)
3276{
3277 LogFlow(("VBGL_IOCTL_SET_MOUSE_STATUS: features=%#x\n", fFeatures));
3278
3279 if (fFeatures & ~VMMDEV_MOUSE_GUEST_MASK)
3280 return VERR_INVALID_PARAMETER;
3281
3282 return vgdrvSetSessionMouseStatus(pDevExt, pSession, fFeatures, ~fFeatures, false /*fSessionTermination*/);
3283}
3284
3285
3286/**
3287 * Return the mask of VMM device events that this session is allowed to see (wrt
3288 * to "acquire" mode guest capabilities).
3289 *
3290 * The events associated with guest capabilities in "acquire" mode will be
3291 * restricted to sessions which has acquired the respective capabilities.
3292 * If someone else tries to wait for acquired events, they won't be woken up
3293 * when the event becomes pending. Should some other thread in the session
3294 * acquire the capability while the corresponding event is pending, the waiting
3295 * thread will woken up.
3296 *
3297 * @returns Mask of events valid for the given session.
3298 * @param pDevExt The device extension.
3299 * @param pSession The session.
3300 *
3301 * @remarks Needs only be called when dispatching events in the
3302 * VBOXGUEST_ACQUIRE_STYLE_EVENTS mask.
3303 */
3304static uint32_t vgdrvGetAllowedEventMaskForSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
3305{
3306 uint32_t fAcquireModeGuestCaps;
3307 uint32_t fAcquiredGuestCaps;
3308 uint32_t fAllowedEvents;
3309
3310 /*
3311 * Note! Reads pSession->fAcquiredGuestCaps and pDevExt->fAcquireModeGuestCaps
3312 * WITHOUT holding VBOXGUESTDEVEXT::SessionSpinlock.
3313 */
3314 fAcquireModeGuestCaps = ASMAtomicUoReadU32(&pDevExt->fAcquireModeGuestCaps);
3315 if (fAcquireModeGuestCaps == 0)
3316 return VMMDEV_EVENT_VALID_EVENT_MASK;
3317 fAcquiredGuestCaps = ASMAtomicUoReadU32(&pSession->fAcquiredGuestCaps);
3318
3319 /*
3320 * Calculate which events to allow according to the cap config and caps
3321 * acquired by the session.
3322 */
3323 fAllowedEvents = VMMDEV_EVENT_VALID_EVENT_MASK;
3324 if ( !(fAcquiredGuestCaps & VMMDEV_GUEST_SUPPORTS_GRAPHICS)
3325 && (fAcquireModeGuestCaps & VMMDEV_GUEST_SUPPORTS_GRAPHICS))
3326 fAllowedEvents &= ~VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST;
3327
3328 if ( !(fAcquiredGuestCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
3329 && (fAcquireModeGuestCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS))
3330 fAllowedEvents &= ~VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
3331
3332 return fAllowedEvents;
3333}
3334
3335
3336/**
3337 * Init and termination worker for set guest capabilities to zero on the host.
3338 *
3339 * @returns VBox status code.
3340 * @param pDevExt The device extension.
3341 */
3342static int vgdrvResetCapabilitiesOnHost(PVBOXGUESTDEVEXT pDevExt)
3343{
3344 VMMDevReqGuestCapabilities2 *pReq;
3345 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
3346 if (RT_SUCCESS(rc))
3347 {
3348 pReq->u32NotMask = UINT32_MAX;
3349 pReq->u32OrMask = 0;
3350 rc = VbglR0GRPerform(&pReq->header);
3351
3352 if (RT_FAILURE(rc))
3353 LogRelFunc(("failed with rc=%Rrc\n", rc));
3354 VbglR0GRFree(&pReq->header);
3355 }
3356 RT_NOREF1(pDevExt);
3357 return rc;
3358}
3359
3360
3361/**
3362 * Sets the guest capabilities to the host while holding the lock.
3363 *
3364 * This will ASSUME that we're the ones in charge of the mask, so
3365 * we'll simply clear all bits we don't set.
3366 *
3367 * @returns VBox status code.
3368 * @param pDevExt The device extension.
3369 * @param pReq The request.
3370 */
3371static int vgdrvUpdateCapabilitiesOnHostWithReqAndLock(PVBOXGUESTDEVEXT pDevExt, VMMDevReqGuestCapabilities2 *pReq)
3372{
3373 int rc;
3374
3375 pReq->u32OrMask = pDevExt->fAcquiredGuestCaps | pDevExt->SetGuestCapsTracker.fMask;
3376 if (pReq->u32OrMask == pDevExt->fGuestCapsHost)
3377 rc = VINF_SUCCESS;
3378 else
3379 {
3380 pDevExt->fGuestCapsHost = pReq->u32OrMask;
3381 pReq->u32NotMask = ~pReq->u32OrMask;
3382 rc = VbglR0GRPerform(&pReq->header);
3383 if (RT_FAILURE(rc))
3384 pDevExt->fGuestCapsHost = UINT32_MAX;
3385 }
3386
3387 return rc;
3388}
3389
3390
3391/**
3392 * Switch a set of capabilities into "acquire" mode and (maybe) acquire them for
3393 * the given session.
3394 *
3395 * This is called in response to VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE as well as
3396 * to do session cleanup.
3397 *
3398 * @returns VBox status code.
3399 * @param pDevExt The device extension.
3400 * @param pSession The session.
3401 * @param fOrMask The capabilities to add .
3402 * @param fNotMask The capabilities to remove. Ignored in
3403 * VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE.
3404 * @param fFlags Confusing operation modifier.
3405 * VBOXGUESTCAPSACQUIRE_FLAGS_NONE means to both
3406 * configure and acquire/release the capabilities.
3407 * VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE
3408 * means only configure capabilities in the
3409 * @a fOrMask capabilities for "acquire" mode.
3410 * @param fSessionTermination Set if we're called by the session cleanup code.
3411 * This tweaks the error handling so we perform
3412 * proper session cleanup even if the host
3413 * misbehaves.
3414 *
3415 * @remarks Takes both the session and event spinlocks.
3416 */
3417static int vgdrvAcquireSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
3418 uint32_t fOrMask, uint32_t fNotMask, uint32_t fFlags,
3419 bool fSessionTermination)
3420{
3421 uint32_t fCurrentOwnedCaps;
3422 uint32_t fSessionRemovedCaps;
3423 uint32_t fSessionAddedCaps;
3424 uint32_t fOtherConflictingCaps;
3425 VMMDevReqGuestCapabilities2 *pReq = NULL;
3426 int rc;
3427
3428
3429 /*
3430 * Validate and adjust input.
3431 */
3432 if (fOrMask & ~( VMMDEV_GUEST_SUPPORTS_SEAMLESS
3433 | VMMDEV_GUEST_SUPPORTS_GUEST_HOST_WINDOW_MAPPING
3434 | VMMDEV_GUEST_SUPPORTS_GRAPHICS ) )
3435 {
3436 LogRel(("vgdrvAcquireSessionCapabilities: invalid fOrMask=%#x (pSession=%p fNotMask=%#x fFlags=%#x)\n",
3437 fOrMask, pSession, fNotMask, fFlags));
3438 return VERR_INVALID_PARAMETER;
3439 }
3440
3441 if ((fFlags & ~VBGL_IOC_AGC_FLAGS_VALID_MASK) != 0)
3442 {
3443 LogRel(("vgdrvAcquireSessionCapabilities: invalid fFlags=%#x (pSession=%p fOrMask=%#x fNotMask=%#x)\n",
3444 fFlags, pSession, fOrMask, fNotMask));
3445 return VERR_INVALID_PARAMETER;
3446 }
3447 Assert(!fOrMask || !fSessionTermination);
3448
3449 /* The fNotMask no need to have all values valid, invalid ones will simply be ignored. */
3450 fNotMask &= ~fOrMask;
3451
3452 /*
3453 * Preallocate a update request if we're about to do more than just configure
3454 * the capability mode.
3455 */
3456 if (!(fFlags & VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE))
3457 {
3458 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
3459 if (RT_SUCCESS(rc))
3460 { /* do nothing */ }
3461 else if (!fSessionTermination)
3462 {
3463 LogRel(("vgdrvAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x fFlags=%#x: VbglR0GRAlloc failure: %Rrc\n",
3464 pSession, fOrMask, fNotMask, fFlags, rc));
3465 return rc;
3466 }
3467 else
3468 pReq = NULL; /* Ignore failure, we must do session cleanup. */
3469 }
3470
3471 /*
3472 * Try switch the capabilities in the OR mask into "acquire" mode.
3473 *
3474 * Note! We currently ignore anyone which may already have "set" the capabilities
3475 * in fOrMask. Perhaps not the best way to handle it, but it's simple...
3476 */
3477 RTSpinlockAcquire(pDevExt->EventSpinlock);
3478
3479 if (!(pDevExt->fSetModeGuestCaps & fOrMask))
3480 pDevExt->fAcquireModeGuestCaps |= fOrMask;
3481 else
3482 {
3483 RTSpinlockRelease(pDevExt->EventSpinlock);
3484
3485 if (pReq)
3486 VbglR0GRFree(&pReq->header);
3487 AssertMsgFailed(("Trying to change caps mode: %#x\n", fOrMask));
3488 LogRel(("vgdrvAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x fFlags=%#x: calling caps acquire for set caps\n",
3489 pSession, fOrMask, fNotMask, fFlags));
3490 return VERR_INVALID_STATE;
3491 }
3492
3493 /*
3494 * If we only wanted to switch the capabilities into "acquire" mode, we're done now.
3495 */
3496 if (fFlags & VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE)
3497 {
3498 RTSpinlockRelease(pDevExt->EventSpinlock);
3499
3500 Assert(!pReq);
3501 Log(("vgdrvAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x fFlags=%#x: configured acquire caps: 0x%x\n",
3502 pSession, fOrMask, fNotMask, fFlags));
3503 return VINF_SUCCESS;
3504 }
3505 Assert(pReq || fSessionTermination);
3506
3507 /*
3508 * Caller wants to acquire/release the capabilities too.
3509 *
3510 * Note! The mode change of the capabilities above won't be reverted on
3511 * failure, this is intentional.
3512 */
3513 fCurrentOwnedCaps = pSession->fAcquiredGuestCaps;
3514 fSessionRemovedCaps = fCurrentOwnedCaps & fNotMask;
3515 fSessionAddedCaps = fOrMask & ~fCurrentOwnedCaps;
3516 fOtherConflictingCaps = pDevExt->fAcquiredGuestCaps & ~fCurrentOwnedCaps;
3517 fOtherConflictingCaps &= fSessionAddedCaps;
3518
3519 if (!fOtherConflictingCaps)
3520 {
3521 if (fSessionAddedCaps)
3522 {
3523 pSession->fAcquiredGuestCaps |= fSessionAddedCaps;
3524 pDevExt->fAcquiredGuestCaps |= fSessionAddedCaps;
3525 }
3526
3527 if (fSessionRemovedCaps)
3528 {
3529 pSession->fAcquiredGuestCaps &= ~fSessionRemovedCaps;
3530 pDevExt->fAcquiredGuestCaps &= ~fSessionRemovedCaps;
3531 }
3532
3533 /*
3534 * If something changes (which is very likely), tell the host.
3535 */
3536 if (fSessionAddedCaps || fSessionRemovedCaps || pDevExt->fGuestCapsHost == UINT32_MAX)
3537 {
3538 Assert(pReq || fSessionTermination);
3539 if (pReq)
3540 {
3541 rc = vgdrvUpdateCapabilitiesOnHostWithReqAndLock(pDevExt, pReq);
3542 if (RT_FAILURE(rc) && !fSessionTermination)
3543 {
3544 /* Failed, roll back. */
3545 if (fSessionAddedCaps)
3546 {
3547 pSession->fAcquiredGuestCaps &= ~fSessionAddedCaps;
3548 pDevExt->fAcquiredGuestCaps &= ~fSessionAddedCaps;
3549 }
3550 if (fSessionRemovedCaps)
3551 {
3552 pSession->fAcquiredGuestCaps |= fSessionRemovedCaps;
3553 pDevExt->fAcquiredGuestCaps |= fSessionRemovedCaps;
3554 }
3555
3556 RTSpinlockRelease(pDevExt->EventSpinlock);
3557 LogRel(("vgdrvAcquireSessionCapabilities: vgdrvUpdateCapabilitiesOnHostWithReqAndLock failed: rc=%Rrc\n", rc));
3558 VbglR0GRFree(&pReq->header);
3559 return rc;
3560 }
3561 }
3562 }
3563 }
3564 else
3565 {
3566 RTSpinlockRelease(pDevExt->EventSpinlock);
3567
3568 Log(("vgdrvAcquireSessionCapabilities: Caps %#x were busy\n", fOtherConflictingCaps));
3569 VbglR0GRFree(&pReq->header);
3570 return VERR_RESOURCE_BUSY;
3571 }
3572
3573 RTSpinlockRelease(pDevExt->EventSpinlock);
3574 if (pReq)
3575 VbglR0GRFree(&pReq->header);
3576
3577 /*
3578 * If we added a capability, check if that means some other thread in our
3579 * session should be unblocked because there are events pending.
3580 *
3581 * HACK ALERT! When the seamless support capability is added we generate a
3582 * seamless change event so that the ring-3 client can sync with
3583 * the seamless state. Although this introduces a spurious
3584 * wakeups of the ring-3 client, it solves the problem of client
3585 * state inconsistency in multiuser environment (on Windows).
3586 */
3587 if (fSessionAddedCaps)
3588 {
3589 uint32_t fGenFakeEvents = 0;
3590 if (fSessionAddedCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
3591 fGenFakeEvents |= VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
3592
3593 RTSpinlockAcquire(pDevExt->EventSpinlock);
3594 if (fGenFakeEvents || pDevExt->f32PendingEvents)
3595 vgdrvDispatchEventsLocked(pDevExt, fGenFakeEvents);
3596 RTSpinlockRelease(pDevExt->EventSpinlock);
3597
3598#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
3599 VGDrvCommonWaitDoWakeUps(pDevExt);
3600#endif
3601 }
3602
3603 return VINF_SUCCESS;
3604}
3605
3606
3607/**
3608 * Handle VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES.
3609 *
3610 * @returns VBox status code.
3611 *
3612 * @param pDevExt The device extension.
3613 * @param pSession The session.
3614 * @param pAcquire The request.
3615 */
3616static int vgdrvIoCtl_GuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCACQUIREGUESTCAPS pAcquire)
3617{
3618 int rc;
3619 LogFlow(("VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES: or=%#x not=%#x flags=%#x\n",
3620 pAcquire->u.In.fOrMask, pAcquire->u.In.fNotMask, pAcquire->u.In.fFlags));
3621
3622 rc = vgdrvAcquireSessionCapabilities(pDevExt, pSession, pAcquire->u.In.fOrMask, pAcquire->u.In.fNotMask,
3623 pAcquire->u.In.fFlags, false /*fSessionTermination*/);
3624 if (RT_FAILURE(rc))
3625 LogRel(("VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES failed rc=%Rrc\n", rc));
3626 return rc;
3627}
3628
3629
3630/**
3631 * Sets the guest capabilities for a session.
3632 *
3633 * @returns VBox status code.
3634 * @param pDevExt The device extension.
3635 * @param pSession The session.
3636 * @param fOrMask The capabilities to add.
3637 * @param fNotMask The capabilities to remove.
3638 * @param pfSessionCaps Where to return the guest capabilities reported
3639 * for this session. Optional.
3640 * @param pfGlobalCaps Where to return the guest capabilities reported
3641 * for all the sessions. Optional.
3642 *
3643 * @param fSessionTermination Set if we're called by the session cleanup code.
3644 * This tweaks the error handling so we perform
3645 * proper session cleanup even if the host
3646 * misbehaves.
3647 *
3648 * @remarks Takes the session spinlock.
3649 */
3650static int vgdrvSetSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
3651 uint32_t fOrMask, uint32_t fNotMask, uint32_t *pfSessionCaps, uint32_t *pfGlobalCaps,
3652 bool fSessionTermination)
3653{
3654 /*
3655 * Preallocate a request buffer so we can do all in one go without leaving the spinlock.
3656 */
3657 VMMDevReqGuestCapabilities2 *pReq;
3658 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
3659 if (RT_SUCCESS(rc))
3660 { /* nothing */ }
3661 else if (!fSessionTermination)
3662 {
3663 if (pfSessionCaps)
3664 *pfSessionCaps = UINT32_MAX;
3665 if (pfGlobalCaps)
3666 *pfGlobalCaps = UINT32_MAX;
3667 LogRel(("vgdrvSetSessionCapabilities: VbglR0GRAlloc failure: %Rrc\n", rc));
3668 return rc;
3669 }
3670 else
3671 pReq = NULL; /* Ignore failure, we must do session cleanup. */
3672
3673
3674 RTSpinlockAcquire(pDevExt->SessionSpinlock);
3675
3676#ifndef VBOXGUEST_DISREGARD_ACQUIRE_MODE_GUEST_CAPS
3677 /*
3678 * Capabilities in "acquire" mode cannot be set via this API.
3679 * (Acquire mode is only used on windows at the time of writing.)
3680 */
3681 if (!(fOrMask & pDevExt->fAcquireModeGuestCaps))
3682#endif
3683 {
3684 /*
3685 * Apply the changes to the session mask.
3686 */
3687 uint32_t fChanged;
3688 uint32_t fPrevious = pSession->fCapabilities;
3689 pSession->fCapabilities |= fOrMask;
3690 pSession->fCapabilities &= ~fNotMask;
3691
3692 /*
3693 * If anything actually changed, update the global usage counters.
3694 */
3695 fChanged = fPrevious ^ pSession->fCapabilities;
3696 if (fChanged)
3697 {
3698 bool fGlobalChange = vgdrvBitUsageTrackerChange(&pDevExt->SetGuestCapsTracker, fChanged, fPrevious,
3699 pDevExt->cSessions, "SetGuestCapsTracker");
3700
3701 /*
3702 * If there are global changes, update the capabilities on the host.
3703 */
3704 if (fGlobalChange || pDevExt->fGuestCapsHost == UINT32_MAX)
3705 {
3706 Assert(pReq || fSessionTermination);
3707 if (pReq)
3708 {
3709 rc = vgdrvUpdateCapabilitiesOnHostWithReqAndLock(pDevExt, pReq);
3710
3711 /* On failure, roll back (unless it's session termination time). */
3712 if (RT_FAILURE(rc) && !fSessionTermination)
3713 {
3714 vgdrvBitUsageTrackerChange(&pDevExt->SetGuestCapsTracker, fChanged, pSession->fCapabilities,
3715 pDevExt->cSessions, "SetGuestCapsTracker");
3716 pSession->fCapabilities = fPrevious;
3717 }
3718 }
3719 }
3720 }
3721 }
3722#ifndef VBOXGUEST_DISREGARD_ACQUIRE_MODE_GUEST_CAPS
3723 else
3724 rc = VERR_RESOURCE_BUSY;
3725#endif
3726
3727 if (pfSessionCaps)
3728 *pfSessionCaps = pSession->fCapabilities;
3729 if (pfGlobalCaps)
3730 *pfGlobalCaps = pDevExt->fAcquiredGuestCaps | pDevExt->SetGuestCapsTracker.fMask;
3731
3732 RTSpinlockRelease(pDevExt->SessionSpinlock);
3733 if (pReq)
3734 VbglR0GRFree(&pReq->header);
3735 return rc;
3736}
3737
3738
3739/**
3740 * Handle VBGL_IOCTL_CHANGE_GUEST_CAPABILITIES.
3741 *
3742 * @returns VBox status code.
3743 *
3744 * @param pDevExt The device extension.
3745 * @param pSession The session.
3746 * @param pInfo The request.
3747 */
3748static int vgdrvIoCtl_SetCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCSETGUESTCAPS pInfo)
3749{
3750 int rc;
3751 LogFlow(("VBGL_IOCTL_CHANGE_GUEST_CAPABILITIES: or=%#x not=%#x\n", pInfo->u.In.fOrMask, pInfo->u.In.fNotMask));
3752
3753 if (!((pInfo->u.In.fOrMask | pInfo->u.In.fNotMask) & ~VMMDEV_GUEST_CAPABILITIES_MASK))
3754 rc = vgdrvSetSessionCapabilities(pDevExt, pSession, pInfo->u.In.fOrMask, pInfo->u.In.fNotMask,
3755 &pInfo->u.Out.fSessionCaps, &pInfo->u.Out.fGlobalCaps, false /*fSessionTermination*/);
3756 else
3757 rc = VERR_INVALID_PARAMETER;
3758
3759 return rc;
3760}
3761
3762/** @} */
3763
3764
3765/**
3766 * Common IOCtl for user to kernel and kernel to kernel communication.
3767 *
3768 * This function only does the basic validation and then invokes
3769 * worker functions that takes care of each specific function.
3770 *
3771 * @returns VBox status code.
3772 *
3773 * @param iFunction The requested function.
3774 * @param pDevExt The device extension.
3775 * @param pSession The client session.
3776 * @param pReqHdr Pointer to the request. This always starts with
3777 * a request common header.
3778 * @param cbReq The max size of the request buffer.
3779 */
3780int VGDrvCommonIoCtl(uintptr_t iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLREQHDR pReqHdr, size_t cbReq)
3781{
3782 uintptr_t const iFunctionStripped = VBGL_IOCTL_CODE_STRIPPED(iFunction);
3783 int rc;
3784
3785 LogFlow(("VGDrvCommonIoCtl: iFunction=%#x pDevExt=%p pSession=%p pReqHdr=%p cbReq=%zu\n",
3786 iFunction, pDevExt, pSession, pReqHdr, cbReq));
3787
3788 /*
3789 * Define some helper macros to simplify validation.
3790 */
3791#define REQ_CHECK_SIZES_EX(Name, cbInExpect, cbOutExpect) \
3792 do { \
3793 if (RT_LIKELY( pReqHdr->cbIn == (cbInExpect) \
3794 && ( pReqHdr->cbOut == (cbOutExpect) \
3795 || ((cbInExpect) == (cbOutExpect) && pReqHdr->cbOut == 0) ) )) \
3796 { /* likely */ } \
3797 else \
3798 { \
3799 Log(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n", \
3800 (long)pReqHdr->cbIn, (long)(cbInExpect), (long)pReqHdr->cbOut, (long)(cbOutExpect))); \
3801 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
3802 } \
3803 } while (0)
3804
3805#define REQ_CHECK_SIZES(Name) REQ_CHECK_SIZES_EX(Name, Name ## _SIZE_IN, Name ## _SIZE_OUT)
3806
3807#define REQ_CHECK_SIZE_IN(Name, cbInExpect) \
3808 do { \
3809 if (RT_LIKELY(pReqHdr->cbIn == (cbInExpect))) \
3810 { /* likely */ } \
3811 else \
3812 { \
3813 Log(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld.\n", \
3814 (long)pReqHdr->cbIn, (long)(cbInExpect))); \
3815 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
3816 } \
3817 } while (0)
3818
3819#define REQ_CHECK_SIZE_OUT(Name, cbOutExpect) \
3820 do { \
3821 if (RT_LIKELY( pReqHdr->cbOut == (cbOutExpect) \
3822 || (pReqHdr->cbOut == 0 && pReqHdr->cbIn == (cbOutExpect)))) \
3823 { /* likely */ } \
3824 else \
3825 { \
3826 Log(( #Name ": Invalid input/output sizes. cbOut=%ld (%ld) expected %ld.\n", \
3827 (long)pReqHdr->cbOut, (long)pReqHdr->cbIn, (long)(cbOutExpect))); \
3828 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
3829 } \
3830 } while (0)
3831
3832#define REQ_CHECK_EXPR(Name, expr) \
3833 do { \
3834 if (RT_LIKELY(!!(expr))) \
3835 { /* likely */ } \
3836 else \
3837 { \
3838 Log(( #Name ": %s\n", #expr)); \
3839 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
3840 } \
3841 } while (0)
3842
3843#define REQ_CHECK_EXPR_FMT(expr, fmt) \
3844 do { \
3845 if (RT_LIKELY(!!(expr))) \
3846 { /* likely */ } \
3847 else \
3848 { \
3849 Log( fmt ); \
3850 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
3851 } \
3852 } while (0)
3853
3854#define REQ_CHECK_RING0(mnemonic) \
3855 do { \
3856 if (pSession->R0Process != NIL_RTR0PROCESS) \
3857 { \
3858 LogFunc((mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
3859 pSession->Process, (uintptr_t)pSession->R0Process)); \
3860 return pReqHdr->rc = VERR_PERMISSION_DENIED; \
3861 } \
3862 } while (0)
3863
3864
3865 /*
3866 * Validate the request.
3867 */
3868 if (RT_LIKELY(cbReq >= sizeof(*pReqHdr)))
3869 { /* likely */ }
3870 else
3871 {
3872 Log(("VGDrvCommonIoCtl: Bad ioctl request size; cbReq=%#lx\n", (long)cbReq));
3873 return VERR_INVALID_PARAMETER;
3874 }
3875
3876 if (pReqHdr->cbOut == 0)
3877 pReqHdr->cbOut = pReqHdr->cbIn;
3878
3879 if (RT_LIKELY( pReqHdr->uVersion == VBGLREQHDR_VERSION
3880 && pReqHdr->cbIn >= sizeof(*pReqHdr)
3881 && pReqHdr->cbIn <= cbReq
3882 && pReqHdr->cbOut >= sizeof(*pReqHdr)
3883 && pReqHdr->cbOut <= cbReq))
3884 { /* likely */ }
3885 else
3886 {
3887 Log(("VGDrvCommonIoCtl: Bad ioctl request header; cbIn=%#lx cbOut=%#lx version=%#lx\n",
3888 (long)pReqHdr->cbIn, (long)pReqHdr->cbOut, (long)pReqHdr->uVersion));
3889 return VERR_INVALID_PARAMETER;
3890 }
3891
3892 if (RT_LIKELY(RT_VALID_PTR(pSession)))
3893 { /* likely */ }
3894 else
3895 {
3896 Log(("VGDrvCommonIoCtl: Invalid pSession value %p (ioctl=%#x)\n", pSession, iFunction));
3897 return VERR_INVALID_PARAMETER;
3898 }
3899
3900
3901 /*
3902 * Deal with variably sized requests first.
3903 */
3904 rc = VINF_SUCCESS;
3905 if ( iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_VMMDEV_REQUEST(0))
3906 || iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_VMMDEV_REQUEST_BIG) )
3907 {
3908 REQ_CHECK_EXPR(VBGL_IOCTL_VMMDEV_REQUEST, pReqHdr->uType != VBGLREQHDR_TYPE_DEFAULT);
3909 REQ_CHECK_EXPR_FMT(pReqHdr->cbIn == pReqHdr->cbOut,
3910 ("VBGL_IOCTL_VMMDEV_REQUEST: cbIn=%ld != cbOut=%ld\n", (long)pReqHdr->cbIn, (long)pReqHdr->cbOut));
3911 pReqHdr->rc = vgdrvIoCtl_VMMDevRequest(pDevExt, pSession, (VMMDevRequestHeader *)pReqHdr, cbReq);
3912 }
3913 else if (RT_LIKELY(pReqHdr->uType == VBGLREQHDR_TYPE_DEFAULT))
3914 {
3915 if (iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_LOG(0)))
3916 {
3917 REQ_CHECK_SIZE_OUT(VBGL_IOCTL_LOG, VBGL_IOCTL_LOG_SIZE_OUT);
3918 pReqHdr->rc = vgdrvIoCtl_Log(pDevExt, &((PVBGLIOCLOG)pReqHdr)->u.In.szMsg[0], pReqHdr->cbIn - sizeof(VBGLREQHDR),
3919 pSession->fUserSession);
3920 }
3921#ifdef VBOX_WITH_HGCM
3922 else if ( iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_HGCM_CALL(0))
3923# if ARCH_BITS == 64
3924 || iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_HGCM_CALL_32(0))
3925# endif
3926 )
3927 {
3928 REQ_CHECK_EXPR(VBGL_IOCTL_HGCM_CALL, pReqHdr->cbIn >= sizeof(VBGLIOCHGCMCALL));
3929 REQ_CHECK_EXPR(VBGL_IOCTL_HGCM_CALL, pReqHdr->cbIn == pReqHdr->cbOut);
3930 pReqHdr->rc = vgdrvIoCtl_HGCMCallWrapper(pDevExt, pSession, (PVBGLIOCHGCMCALL)pReqHdr,
3931 iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_HGCM_CALL_32(0)),
3932 false /*fUserData*/, cbReq);
3933 }
3934 else if (iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_HGCM_CALL_WITH_USER_DATA(0)))
3935 {
3936 REQ_CHECK_RING0("VBGL_IOCTL_HGCM_CALL_WITH_USER_DATA");
3937 REQ_CHECK_EXPR(VBGL_IOCTL_HGCM_CALL, pReqHdr->cbIn >= sizeof(VBGLIOCHGCMCALL));
3938 REQ_CHECK_EXPR(VBGL_IOCTL_HGCM_CALL, pReqHdr->cbIn == pReqHdr->cbOut);
3939 pReqHdr->rc = vgdrvIoCtl_HGCMCallWrapper(pDevExt, pSession, (PVBGLIOCHGCMCALL)pReqHdr,
3940 ARCH_BITS == 32, true /*fUserData*/, cbReq);
3941 }
3942#endif /* VBOX_WITH_HGCM */
3943 else
3944 {
3945 switch (iFunction)
3946 {
3947 /*
3948 * Ring-0 only:
3949 */
3950 case VBGL_IOCTL_IDC_CONNECT:
3951 REQ_CHECK_RING0("VBGL_IOCL_IDC_CONNECT");
3952 REQ_CHECK_SIZES(VBGL_IOCTL_IDC_CONNECT);
3953 pReqHdr->rc = vgdrvIoCtl_IdcConnect(pDevExt, pSession, (PVBGLIOCIDCCONNECT)pReqHdr);
3954 break;
3955
3956 case VBGL_IOCTL_IDC_DISCONNECT:
3957 REQ_CHECK_RING0("VBGL_IOCTL_IDC_DISCONNECT");
3958 REQ_CHECK_SIZES(VBGL_IOCTL_IDC_DISCONNECT);
3959 pReqHdr->rc = vgdrvIoCtl_IdcDisconnect(pDevExt, pSession, (PVBGLIOCIDCDISCONNECT)pReqHdr);
3960 break;
3961
3962 case VBGL_IOCTL_GET_VMMDEV_IO_INFO:
3963 REQ_CHECK_RING0("GET_VMMDEV_IO_INFO");
3964 REQ_CHECK_SIZES(VBGL_IOCTL_GET_VMMDEV_IO_INFO);
3965 pReqHdr->rc = vgdrvIoCtl_GetVMMDevIoInfo(pDevExt, (PVBGLIOCGETVMMDEVIOINFO)pReqHdr);
3966 break;
3967
3968 case VBGL_IOCTL_SET_MOUSE_NOTIFY_CALLBACK:
3969 REQ_CHECK_RING0("SET_MOUSE_NOTIFY_CALLBACK");
3970 REQ_CHECK_SIZES(VBGL_IOCTL_SET_MOUSE_NOTIFY_CALLBACK);
3971 pReqHdr->rc = vgdrvIoCtl_SetMouseNotifyCallback(pDevExt, (PVBGLIOCSETMOUSENOTIFYCALLBACK)pReqHdr);
3972 break;
3973
3974 /*
3975 * Ring-3 only:
3976 */
3977 case VBGL_IOCTL_DRIVER_VERSION_INFO:
3978 REQ_CHECK_SIZES(VBGL_IOCTL_DRIVER_VERSION_INFO);
3979 pReqHdr->rc = vgdrvIoCtl_DriverVersionInfo(pDevExt, pSession, (PVBGLIOCDRIVERVERSIONINFO)pReqHdr);
3980 break;
3981
3982 /*
3983 * Both ring-3 and ring-0:
3984 */
3985 case VBGL_IOCTL_WAIT_FOR_EVENTS:
3986 REQ_CHECK_SIZES(VBGL_IOCTL_WAIT_FOR_EVENTS);
3987 pReqHdr->rc = vgdrvIoCtl_WaitForEvents(pDevExt, pSession, (VBGLIOCWAITFOREVENTS *)pReqHdr,
3988 pSession->R0Process != NIL_RTR0PROCESS);
3989 break;
3990
3991 case VBGL_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS:
3992 REQ_CHECK_SIZES(VBGL_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS);
3993 pReqHdr->rc = vgdrvIoCtl_CancelAllWaitEvents(pDevExt, pSession);
3994 break;
3995
3996 case VBGL_IOCTL_CHANGE_FILTER_MASK:
3997 REQ_CHECK_SIZES(VBGL_IOCTL_CHANGE_FILTER_MASK);
3998 pReqHdr->rc = vgdrvIoCtl_ChangeFilterMask(pDevExt, pSession, (PVBGLIOCCHANGEFILTERMASK)pReqHdr);
3999 break;
4000
4001#ifdef VBOX_WITH_HGCM
4002 case VBGL_IOCTL_HGCM_CONNECT:
4003 REQ_CHECK_SIZES(VBGL_IOCTL_HGCM_CONNECT);
4004 pReqHdr->rc = vgdrvIoCtl_HGCMConnect(pDevExt, pSession, (PVBGLIOCHGCMCONNECT)pReqHdr);
4005 break;
4006
4007 case VBGL_IOCTL_HGCM_DISCONNECT:
4008 REQ_CHECK_SIZES(VBGL_IOCTL_HGCM_DISCONNECT);
4009 pReqHdr->rc = vgdrvIoCtl_HGCMDisconnect(pDevExt, pSession, (PVBGLIOCHGCMDISCONNECT)pReqHdr);
4010 break;
4011#endif
4012
4013 case VBGL_IOCTL_CHECK_BALLOON:
4014 REQ_CHECK_SIZES(VBGL_IOCTL_CHECK_BALLOON);
4015 pReqHdr->rc = vgdrvIoCtl_CheckMemoryBalloon(pDevExt, pSession, (PVBGLIOCCHECKBALLOON)pReqHdr);
4016 break;
4017
4018 case VBGL_IOCTL_CHANGE_BALLOON:
4019 REQ_CHECK_SIZES(VBGL_IOCTL_CHANGE_BALLOON);
4020 pReqHdr->rc = vgdrvIoCtl_ChangeMemoryBalloon(pDevExt, pSession, (PVBGLIOCCHANGEBALLOON)pReqHdr);
4021 break;
4022
4023 case VBGL_IOCTL_WRITE_CORE_DUMP:
4024 REQ_CHECK_SIZES(VBGL_IOCTL_WRITE_CORE_DUMP);
4025 pReqHdr->rc = vgdrvIoCtl_WriteCoreDump(pDevExt, (PVBGLIOCWRITECOREDUMP)pReqHdr);
4026 break;
4027
4028 case VBGL_IOCTL_SET_MOUSE_STATUS:
4029 REQ_CHECK_SIZES(VBGL_IOCTL_SET_MOUSE_STATUS);
4030 pReqHdr->rc = vgdrvIoCtl_SetMouseStatus(pDevExt, pSession, ((PVBGLIOCSETMOUSESTATUS)pReqHdr)->u.In.fStatus);
4031 break;
4032
4033 case VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES:
4034 REQ_CHECK_SIZES(VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES);
4035 pReqHdr->rc = vgdrvIoCtl_GuestCapsAcquire(pDevExt, pSession, (PVBGLIOCACQUIREGUESTCAPS)pReqHdr);
4036 break;
4037
4038 case VBGL_IOCTL_CHANGE_GUEST_CAPABILITIES:
4039 REQ_CHECK_SIZES(VBGL_IOCTL_CHANGE_GUEST_CAPABILITIES);
4040 pReqHdr->rc = vgdrvIoCtl_SetCapabilities(pDevExt, pSession, (PVBGLIOCSETGUESTCAPS)pReqHdr);
4041 break;
4042
4043#ifdef VBOX_WITH_DPC_LATENCY_CHECKER
4044 case VBGL_IOCTL_DPC_LATENCY_CHECKER:
4045 REQ_CHECK_SIZES(VBGL_IOCTL_DPC_LATENCY_CHECKER);
4046 pReqHdr->rc = VGDrvNtIOCtl_DpcLatencyChecker();
4047 break;
4048#endif
4049
4050 default:
4051 {
4052 LogRel(("VGDrvCommonIoCtl: Unknown request iFunction=%#x (stripped %#x) cbReq=%#x\n",
4053 iFunction, iFunctionStripped, cbReq));
4054 pReqHdr->rc = rc = VERR_NOT_SUPPORTED;
4055 break;
4056 }
4057 }
4058 }
4059 }
4060 else
4061 {
4062 Log(("VGDrvCommonIoCtl: uType=%#x, expected default (ioctl=%#x)\n", pReqHdr->uType, iFunction));
4063 return VERR_INVALID_PARAMETER;
4064 }
4065
4066 LogFlow(("VGDrvCommonIoCtl: returns %Rrc (req: rc=%Rrc cbOut=%#x)\n", rc, pReqHdr->rc, pReqHdr->cbOut));
4067 return rc;
4068}
4069
4070
4071/**
4072 * Used by VGDrvCommonISR as well as the acquire guest capability code.
4073 *
4074 * @returns VINF_SUCCESS on success. On failure, ORed together
4075 * RTSemEventMultiSignal errors (completes processing despite errors).
4076 * @param pDevExt The VBoxGuest device extension.
4077 * @param fEvents The events to dispatch.
4078 */
4079static int vgdrvDispatchEventsLocked(PVBOXGUESTDEVEXT pDevExt, uint32_t fEvents)
4080{
4081 PVBOXGUESTWAIT pWait;
4082 PVBOXGUESTWAIT pSafe;
4083 int rc = VINF_SUCCESS;
4084
4085 fEvents |= pDevExt->f32PendingEvents;
4086
4087 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
4088 {
4089 uint32_t fHandledEvents = pWait->fReqEvents & fEvents;
4090 if ( fHandledEvents != 0
4091 && !pWait->fResEvents)
4092 {
4093 /* Does this one wait on any of the events we're dispatching? We do a quick
4094 check first, then deal with VBOXGUEST_ACQUIRE_STYLE_EVENTS as applicable. */
4095 if (fHandledEvents & VBOXGUEST_ACQUIRE_STYLE_EVENTS)
4096 fHandledEvents &= vgdrvGetAllowedEventMaskForSession(pDevExt, pWait->pSession);
4097 if (fHandledEvents)
4098 {
4099 pWait->fResEvents = pWait->fReqEvents & fEvents & fHandledEvents;
4100 fEvents &= ~pWait->fResEvents;
4101 RTListNodeRemove(&pWait->ListNode);
4102#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
4103 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
4104#else
4105 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
4106 rc |= RTSemEventMultiSignal(pWait->Event);
4107#endif
4108 if (!fEvents)
4109 break;
4110 }
4111 }
4112 }
4113
4114 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
4115 return rc;
4116}
4117
4118
4119/**
4120 * Simply checks whether the IRQ is ours or not, does not do any interrupt
4121 * procesing.
4122 *
4123 * @returns true if it was our interrupt, false if it wasn't.
4124 * @param pDevExt The VBoxGuest device extension.
4125 */
4126bool VGDrvCommonIsOurIRQ(PVBOXGUESTDEVEXT pDevExt)
4127{
4128 RTSpinlockAcquire(pDevExt->EventSpinlock);
4129 bool const fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
4130 RTSpinlockRelease(pDevExt->EventSpinlock);
4131
4132 return fOurIrq;
4133}
4134
4135
4136/**
4137 * Common interrupt service routine.
4138 *
4139 * This deals with events and with waking up thread waiting for those events.
4140 *
4141 * @returns true if it was our interrupt, false if it wasn't.
4142 * @param pDevExt The VBoxGuest device extension.
4143 */
4144bool VGDrvCommonISR(PVBOXGUESTDEVEXT pDevExt)
4145{
4146 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
4147 bool fMousePositionChanged = false;
4148 int rc = 0;
4149 bool fOurIrq;
4150
4151 /*
4152 * Make sure we've initialized the device extension.
4153 */
4154 if (RT_UNLIKELY(!pReq))
4155 return false;
4156
4157 /*
4158 * Enter the spinlock and check if it's our IRQ or not.
4159 */
4160 RTSpinlockAcquire(pDevExt->EventSpinlock);
4161 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
4162 if (fOurIrq)
4163 {
4164 /*
4165 * Acknowlegde events.
4166 * We don't use VbglR0GRPerform here as it may take another spinlocks.
4167 */
4168 pReq->header.rc = VERR_INTERNAL_ERROR;
4169 pReq->events = 0;
4170 ASMCompilerBarrier();
4171 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
4172 ASMCompilerBarrier(); /* paranoia */
4173 if (RT_SUCCESS(pReq->header.rc))
4174 {
4175 uint32_t fEvents = pReq->events;
4176
4177 Log3(("VGDrvCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
4178
4179 /*
4180 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
4181 */
4182 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
4183 {
4184 fMousePositionChanged = true;
4185 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
4186#if !defined(VBOXGUEST_MOUSE_NOTIFY_CAN_PREEMPT)
4187 if (pDevExt->pfnMouseNotifyCallback)
4188 pDevExt->pfnMouseNotifyCallback(pDevExt->pvMouseNotifyCallbackArg);
4189#endif
4190 }
4191
4192#ifdef VBOX_WITH_HGCM
4193 /*
4194 * The HGCM event/list is kind of different in that we evaluate all entries.
4195 */
4196 if (fEvents & VMMDEV_EVENT_HGCM)
4197 {
4198 PVBOXGUESTWAIT pWait;
4199 PVBOXGUESTWAIT pSafe;
4200 RTListForEachSafe(&pDevExt->HGCMWaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
4201 {
4202 if (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE)
4203 {
4204 pWait->fResEvents = VMMDEV_EVENT_HGCM;
4205 RTListNodeRemove(&pWait->ListNode);
4206# ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
4207 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
4208# else
4209 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
4210 rc |= RTSemEventMultiSignal(pWait->Event);
4211# endif
4212 }
4213 }
4214 fEvents &= ~VMMDEV_EVENT_HGCM;
4215 }
4216#endif
4217
4218 /*
4219 * Normal FIFO waiter evaluation.
4220 */
4221 rc |= vgdrvDispatchEventsLocked(pDevExt, fEvents);
4222 }
4223 else /* something is serious wrong... */
4224 Log(("VGDrvCommonISR: acknowledge events failed rc=%Rrc (events=%#x)!!\n",
4225 pReq->header.rc, pReq->events));
4226 }
4227 else
4228 Log3(("VGDrvCommonISR: not ours\n"));
4229
4230 RTSpinlockRelease(pDevExt->EventSpinlock);
4231
4232 /*
4233 * Execute the mouse notification callback here if it cannot be executed while
4234 * holding the interrupt safe spinlock, see @bugref{8639}.
4235 */
4236#if defined(VBOXGUEST_MOUSE_NOTIFY_CAN_PREEMPT) && !defined(RT_OS_WINDOWS) /* (Windows does this in the Dpc callback) */
4237 if ( fMousePositionChanged
4238 && pDevExt->pfnMouseNotifyCallback)
4239 pDevExt->pfnMouseNotifyCallback(pDevExt->pvMouseNotifyCallbackArg);
4240#endif
4241
4242#if defined(VBOXGUEST_USE_DEFERRED_WAKE_UP) && !defined(RT_OS_DARWIN) && !defined(RT_OS_WINDOWS)
4243 /*
4244 * Do wake-ups.
4245 * Note. On Windows this isn't possible at this IRQL, so a DPC will take
4246 * care of it. Same on darwin, doing it in the work loop callback.
4247 */
4248 VGDrvCommonWaitDoWakeUps(pDevExt);
4249#endif
4250
4251 /*
4252 * Work the poll and async notification queues on OSes that implements that.
4253 * (Do this outside the spinlock to prevent some recursive spinlocking.)
4254 */
4255 if (fMousePositionChanged)
4256 {
4257 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
4258 VGDrvNativeISRMousePollEvent(pDevExt);
4259 }
4260
4261 Assert(rc == 0);
4262 NOREF(rc);
4263 return fOurIrq;
4264}
4265
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette