VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 70288

Last change on this file since 70288 was 70226, checked in by vboxsync, 7 years ago

VBoxGuest.cpp/VGDrvCommonISR: Don't assume pVMMDevMemory is valid.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 158.3 KB
Line 
1/* $Id: VBoxGuest.cpp 70226 2017-12-19 18:40:41Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27/** @page pg_vbdrv VBoxGuest
28 *
29 * VBoxGuest is the device driver for VMMDev.
30 *
31 * The device driver is shipped as part of the guest additions. It has roots in
32 * the host VMM support driver (usually known as VBoxDrv), so fixes in platform
33 * specific code may apply to both drivers.
34 *
35 * The common code lives in VBoxGuest.cpp and is compiled both as C++ and C.
36 * The VBoxGuest.cpp source file shall not contain platform specific code,
37 * though it must occationally do a few \#ifdef RT_OS_XXX tests to cater for
38 * platform differences. Though, in those cases, it is common that more than
39 * one platform needs special handling.
40 *
41 * On most platforms the device driver should create two device nodes, one for
42 * full (unrestricted) access to the feature set, and one which only provides a
43 * restrict set of functions. These are generally referred to as 'vboxguest'
44 * and 'vboxuser' respectively. Currently, this two device approach is only
45 * implemented on Linux!
46 *
47 */
48
49
50/*********************************************************************************************************************************
51* Header Files *
52*********************************************************************************************************************************/
53#define LOG_GROUP LOG_GROUP_DEFAULT
54#include "VBoxGuestInternal.h"
55#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
56#include <VBox/log.h>
57#include <VBox/HostServices/GuestPropertySvc.h>
58#include <iprt/ctype.h>
59#include <iprt/mem.h>
60#include <iprt/time.h>
61#include <iprt/memobj.h>
62#include <iprt/asm.h>
63#include <iprt/asm-amd64-x86.h>
64#include <iprt/string.h>
65#include <iprt/process.h>
66#include <iprt/assert.h>
67#include <iprt/param.h>
68#include <iprt/timer.h>
69#ifdef VBOX_WITH_HGCM
70# include <iprt/thread.h>
71#endif
72#include "version-generated.h"
73#if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
74# include "revision-generated.h"
75#endif
76#if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
77# include <iprt/rand.h>
78#endif
79
80
81/*********************************************************************************************************************************
82* Defined Constants And Macros *
83*********************************************************************************************************************************/
84#define VBOXGUEST_ACQUIRE_STYLE_EVENTS (VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST | VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST)
85
86
87/*********************************************************************************************************************************
88* Internal Functions *
89*********************************************************************************************************************************/
90#ifdef VBOX_WITH_HGCM
91static DECLCALLBACK(int) vgdrvHgcmAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
92#endif
93static int vgdrvIoCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession);
94static void vgdrvBitUsageTrackerClear(PVBOXGUESTBITUSAGETRACER pTracker);
95static uint32_t vgdrvGetAllowedEventMaskForSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession);
96static int vgdrvResetEventFilterOnHost(PVBOXGUESTDEVEXT pDevExt, uint32_t fFixedEvents);
97static int vgdrvResetMouseStatusOnHost(PVBOXGUESTDEVEXT pDevExt);
98static int vgdrvResetCapabilitiesOnHost(PVBOXGUESTDEVEXT pDevExt);
99static int vgdrvSetSessionEventFilter(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
100 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination);
101static int vgdrvSetSessionMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
102 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination);
103static int vgdrvSetSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
104 uint32_t fOrMask, uint32_t fNoMask,
105 uint32_t *pfSessionCaps, uint32_t *pfGlobalCaps, bool fSessionTermination);
106static int vgdrvAcquireSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
107 uint32_t fOrMask, uint32_t fNotMask, uint32_t fFlags, bool fSessionTermination);
108static int vgdrvDispatchEventsLocked(PVBOXGUESTDEVEXT pDevExt, uint32_t fEvents);
109
110
111/*********************************************************************************************************************************
112* Global Variables *
113*********************************************************************************************************************************/
114static const uint32_t g_cbChangeMemBalloonReq = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
115
116#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
117/**
118 * Drag in the rest of IRPT since we share it with the
119 * rest of the kernel modules on Solaris.
120 */
121PFNRT g_apfnVBoxGuestIPRTDeps[] =
122{
123 /* VirtioNet */
124 (PFNRT)RTRandBytes,
125 /* RTSemMutex* */
126 (PFNRT)RTSemMutexCreate,
127 (PFNRT)RTSemMutexDestroy,
128 (PFNRT)RTSemMutexRequest,
129 (PFNRT)RTSemMutexRequestNoResume,
130 (PFNRT)RTSemMutexRequestDebug,
131 (PFNRT)RTSemMutexRequestNoResumeDebug,
132 (PFNRT)RTSemMutexRelease,
133 (PFNRT)RTSemMutexIsOwned,
134 NULL
135};
136#endif /* RT_OS_DARWIN || RT_OS_SOLARIS */
137
138
139/**
140 * Reserves memory in which the VMM can relocate any guest mappings
141 * that are floating around.
142 *
143 * This operation is a little bit tricky since the VMM might not accept
144 * just any address because of address clashes between the three contexts
145 * it operates in, so use a small stack to perform this operation.
146 *
147 * @returns VBox status code (ignored).
148 * @param pDevExt The device extension.
149 */
150static int vgdrvInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
151{
152 /*
153 * Query the required space.
154 */
155 VMMDevReqHypervisorInfo *pReq;
156 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
157 if (RT_FAILURE(rc))
158 return rc;
159 pReq->hypervisorStart = 0;
160 pReq->hypervisorSize = 0;
161 rc = VbglR0GRPerform(&pReq->header);
162 if (RT_FAILURE(rc)) /* this shouldn't happen! */
163 {
164 VbglR0GRFree(&pReq->header);
165 return rc;
166 }
167
168 /*
169 * The VMM will report back if there is nothing it wants to map, like for
170 * instance in VT-x and AMD-V mode.
171 */
172 if (pReq->hypervisorSize == 0)
173 Log(("vgdrvInitFixateGuestMappings: nothing to do\n"));
174 else
175 {
176 /*
177 * We have to try several times since the host can be picky
178 * about certain addresses.
179 */
180 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
181 uint32_t cbHypervisor = pReq->hypervisorSize;
182 RTR0MEMOBJ ahTries[5];
183 uint32_t iTry;
184 bool fBitched = false;
185 Log(("vgdrvInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
186 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
187 {
188 /*
189 * Reserve space, or if that isn't supported, create a object for
190 * some fictive physical memory and map that in to kernel space.
191 *
192 * To make the code a bit uglier, most systems cannot help with
193 * 4MB alignment, so we have to deal with that in addition to
194 * having two ways of getting the memory.
195 */
196 uint32_t uAlignment = _4M;
197 RTR0MEMOBJ hObj;
198 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
199 if (rc == VERR_NOT_SUPPORTED)
200 {
201 uAlignment = PAGE_SIZE;
202 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
203 }
204 /*
205 * If both RTR0MemObjReserveKernel calls above failed because either not supported or
206 * not implemented at all at the current platform, try to map the memory object into the
207 * virtual kernel space.
208 */
209 if (rc == VERR_NOT_SUPPORTED)
210 {
211 if (hFictive == NIL_RTR0MEMOBJ)
212 {
213 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
214 if (RT_FAILURE(rc))
215 break;
216 hFictive = hObj;
217 }
218 uAlignment = _4M;
219 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
220 if (rc == VERR_NOT_SUPPORTED)
221 {
222 uAlignment = PAGE_SIZE;
223 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
224 }
225 }
226 if (RT_FAILURE(rc))
227 {
228 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
229 rc, cbHypervisor, uAlignment, iTry));
230 fBitched = true;
231 break;
232 }
233
234 /*
235 * Try set it.
236 */
237 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
238 pReq->header.rc = VERR_INTERNAL_ERROR;
239 pReq->hypervisorSize = cbHypervisor;
240 pReq->hypervisorStart = (RTGCPTR32)(uintptr_t)RTR0MemObjAddress(hObj);
241 if ( uAlignment == PAGE_SIZE
242 && pReq->hypervisorStart & (_4M - 1))
243 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
244 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
245
246 rc = VbglR0GRPerform(&pReq->header);
247 if (RT_SUCCESS(rc))
248 {
249 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
250 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
251 RTR0MemObjAddress(pDevExt->hGuestMappings),
252 RTR0MemObjSize(pDevExt->hGuestMappings),
253 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
254 break;
255 }
256 ahTries[iTry] = hObj;
257 }
258
259 /*
260 * Cleanup failed attempts.
261 */
262 while (iTry-- > 0)
263 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
264 if ( RT_FAILURE(rc)
265 && hFictive != NIL_RTR0PTR)
266 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
267 if (RT_FAILURE(rc) && !fBitched)
268 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
269 }
270 VbglR0GRFree(&pReq->header);
271
272 /*
273 * We ignore failed attempts for now.
274 */
275 return VINF_SUCCESS;
276}
277
278
279/**
280 * Undo what vgdrvInitFixateGuestMappings did.
281 *
282 * @param pDevExt The device extension.
283 */
284static void vgdrvTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
285{
286 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
287 {
288 /*
289 * Tell the host that we're going to free the memory we reserved for
290 * it, the free it up. (Leak the memory if anything goes wrong here.)
291 */
292 VMMDevReqHypervisorInfo *pReq;
293 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
294 if (RT_SUCCESS(rc))
295 {
296 pReq->hypervisorStart = 0;
297 pReq->hypervisorSize = 0;
298 rc = VbglR0GRPerform(&pReq->header);
299 VbglR0GRFree(&pReq->header);
300 }
301 if (RT_SUCCESS(rc))
302 {
303 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
304 AssertRC(rc);
305 }
306 else
307 LogRel(("vgdrvTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
308
309 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
310 }
311}
312
313
314
315/**
316 * Report the guest information to the host.
317 *
318 * @returns IPRT status code.
319 * @param enmOSType The OS type to report.
320 */
321static int vgdrvReportGuestInfo(VBOXOSTYPE enmOSType)
322{
323 /*
324 * Allocate and fill in the two guest info reports.
325 */
326 VMMDevReportGuestInfo2 *pReqInfo2 = NULL;
327 VMMDevReportGuestInfo *pReqInfo1 = NULL;
328 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReqInfo2, sizeof (VMMDevReportGuestInfo2), VMMDevReq_ReportGuestInfo2);
329 Log(("vgdrvReportGuestInfo: VbglR0GRAlloc VMMDevReportGuestInfo2 completed with rc=%Rrc\n", rc));
330 if (RT_SUCCESS(rc))
331 {
332 pReqInfo2->guestInfo.additionsMajor = VBOX_VERSION_MAJOR;
333 pReqInfo2->guestInfo.additionsMinor = VBOX_VERSION_MINOR;
334 pReqInfo2->guestInfo.additionsBuild = VBOX_VERSION_BUILD;
335 pReqInfo2->guestInfo.additionsRevision = VBOX_SVN_REV;
336 pReqInfo2->guestInfo.additionsFeatures = 0; /* (no features defined yet) */
337 RTStrCopy(pReqInfo2->guestInfo.szName, sizeof(pReqInfo2->guestInfo.szName), VBOX_VERSION_STRING);
338
339 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReqInfo1, sizeof (VMMDevReportGuestInfo), VMMDevReq_ReportGuestInfo);
340 Log(("vgdrvReportGuestInfo: VbglR0GRAlloc VMMDevReportGuestInfo completed with rc=%Rrc\n", rc));
341 if (RT_SUCCESS(rc))
342 {
343 pReqInfo1->guestInfo.interfaceVersion = VMMDEV_VERSION;
344 pReqInfo1->guestInfo.osType = enmOSType;
345
346 /*
347 * There are two protocols here:
348 * 1. Info2 + Info1. Supported by >=3.2.51.
349 * 2. Info1 and optionally Info2. The old protocol.
350 *
351 * We try protocol 1 first. It will fail with VERR_NOT_SUPPORTED
352 * if not supported by the VMMDev (message ordering requirement).
353 */
354 rc = VbglR0GRPerform(&pReqInfo2->header);
355 Log(("vgdrvReportGuestInfo: VbglR0GRPerform VMMDevReportGuestInfo2 completed with rc=%Rrc\n", rc));
356 if (RT_SUCCESS(rc))
357 {
358 rc = VbglR0GRPerform(&pReqInfo1->header);
359 Log(("vgdrvReportGuestInfo: VbglR0GRPerform VMMDevReportGuestInfo completed with rc=%Rrc\n", rc));
360 }
361 else if ( rc == VERR_NOT_SUPPORTED
362 || rc == VERR_NOT_IMPLEMENTED)
363 {
364 rc = VbglR0GRPerform(&pReqInfo1->header);
365 Log(("vgdrvReportGuestInfo: VbglR0GRPerform VMMDevReportGuestInfo completed with rc=%Rrc\n", rc));
366 if (RT_SUCCESS(rc))
367 {
368 rc = VbglR0GRPerform(&pReqInfo2->header);
369 Log(("vgdrvReportGuestInfo: VbglR0GRPerform VMMDevReportGuestInfo2 completed with rc=%Rrc\n", rc));
370 if (rc == VERR_NOT_IMPLEMENTED)
371 rc = VINF_SUCCESS;
372 }
373 }
374 VbglR0GRFree(&pReqInfo1->header);
375 }
376 VbglR0GRFree(&pReqInfo2->header);
377 }
378
379 return rc;
380}
381
382
383/**
384 * Report the guest driver status to the host.
385 *
386 * @returns IPRT status code.
387 * @param fActive Flag whether the driver is now active or not.
388 */
389static int vgdrvReportDriverStatus(bool fActive)
390{
391 /*
392 * Report guest status of the VBox driver to the host.
393 */
394 VMMDevReportGuestStatus *pReq2 = NULL;
395 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq2, sizeof(*pReq2), VMMDevReq_ReportGuestStatus);
396 Log(("vgdrvReportDriverStatus: VbglR0GRAlloc VMMDevReportGuestStatus completed with rc=%Rrc\n", rc));
397 if (RT_SUCCESS(rc))
398 {
399 pReq2->guestStatus.facility = VBoxGuestFacilityType_VBoxGuestDriver;
400 pReq2->guestStatus.status = fActive ?
401 VBoxGuestFacilityStatus_Active
402 : VBoxGuestFacilityStatus_Inactive;
403 pReq2->guestStatus.flags = 0;
404 rc = VbglR0GRPerform(&pReq2->header);
405 Log(("vgdrvReportDriverStatus: VbglR0GRPerform VMMDevReportGuestStatus completed with fActive=%d, rc=%Rrc\n",
406 fActive ? 1 : 0, rc));
407 if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
408 rc = VINF_SUCCESS;
409 VbglR0GRFree(&pReq2->header);
410 }
411
412 return rc;
413}
414
415
416/** @name Memory Ballooning
417 * @{
418 */
419
420/**
421 * Inflate the balloon by one chunk represented by an R0 memory object.
422 *
423 * The caller owns the balloon mutex.
424 *
425 * @returns IPRT status code.
426 * @param pMemObj Pointer to the R0 memory object.
427 * @param pReq The pre-allocated request for performing the VMMDev call.
428 */
429static int vgdrvBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
430{
431 uint32_t iPage;
432 int rc;
433
434 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
435 {
436 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
437 pReq->aPhysPage[iPage] = phys;
438 }
439
440 pReq->fInflate = true;
441 pReq->header.size = g_cbChangeMemBalloonReq;
442 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
443
444 rc = VbglR0GRPerform(&pReq->header);
445 if (RT_FAILURE(rc))
446 LogRel(("vgdrvBalloonInflate: VbglR0GRPerform failed. rc=%Rrc\n", rc));
447 return rc;
448}
449
450
451/**
452 * Deflate the balloon by one chunk - info the host and free the memory object.
453 *
454 * The caller owns the balloon mutex.
455 *
456 * @returns IPRT status code.
457 * @param pMemObj Pointer to the R0 memory object.
458 * The memory object will be freed afterwards.
459 * @param pReq The pre-allocated request for performing the VMMDev call.
460 */
461static int vgdrvBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
462{
463 uint32_t iPage;
464 int rc;
465
466 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
467 {
468 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
469 pReq->aPhysPage[iPage] = phys;
470 }
471
472 pReq->fInflate = false;
473 pReq->header.size = g_cbChangeMemBalloonReq;
474 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
475
476 rc = VbglR0GRPerform(&pReq->header);
477 if (RT_FAILURE(rc))
478 {
479 LogRel(("vgdrvBalloonDeflate: VbglR0GRPerform failed. rc=%Rrc\n", rc));
480 return rc;
481 }
482
483 rc = RTR0MemObjFree(*pMemObj, true);
484 if (RT_FAILURE(rc))
485 {
486 LogRel(("vgdrvBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
487 return rc;
488 }
489
490 *pMemObj = NIL_RTR0MEMOBJ;
491 return VINF_SUCCESS;
492}
493
494
495/**
496 * Inflate/deflate the memory balloon and notify the host.
497 *
498 * This is a worker used by vgdrvIoCtl_CheckMemoryBalloon - it takes the mutex.
499 *
500 * @returns VBox status code.
501 * @param pDevExt The device extension.
502 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
503 * @param pfHandleInR3 Where to return the handle-in-ring3 indicator
504 * (VINF_SUCCESS if set).
505 */
506static int vgdrvSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, bool *pfHandleInR3)
507{
508 int rc = VINF_SUCCESS;
509
510 if (pDevExt->MemBalloon.fUseKernelAPI)
511 {
512 VMMDevChangeMemBalloon *pReq;
513 uint32_t i;
514
515 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
516 {
517 LogRel(("vgdrvSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
518 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
519 return VERR_INVALID_PARAMETER;
520 }
521
522 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
523 return VINF_SUCCESS; /* nothing to do */
524
525 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
526 && !pDevExt->MemBalloon.paMemObj)
527 {
528 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
529 if (!pDevExt->MemBalloon.paMemObj)
530 {
531 LogRel(("vgdrvSetBalloonSizeKernel: no memory for paMemObj!\n"));
532 return VERR_NO_MEMORY;
533 }
534 }
535
536 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, g_cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
537 if (RT_FAILURE(rc))
538 return rc;
539
540 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
541 {
542 /* inflate */
543 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
544 {
545 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
546 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
547 if (RT_FAILURE(rc))
548 {
549 if (rc == VERR_NOT_SUPPORTED)
550 {
551 /* not supported -- fall back to the R3-allocated memory. */
552 rc = VINF_SUCCESS;
553 pDevExt->MemBalloon.fUseKernelAPI = false;
554 Assert(pDevExt->MemBalloon.cChunks == 0);
555 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
556 }
557 /* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
558 * cannot allocate more memory => don't try further, just stop here */
559 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
560 break;
561 }
562
563 rc = vgdrvBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
564 if (RT_FAILURE(rc))
565 {
566 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
567 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
568 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
569 break;
570 }
571 pDevExt->MemBalloon.cChunks++;
572 }
573 }
574 else
575 {
576 /* deflate */
577 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
578 {
579 rc = vgdrvBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
580 if (RT_FAILURE(rc))
581 {
582 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
583 break;
584 }
585 pDevExt->MemBalloon.cChunks--;
586 }
587 }
588
589 VbglR0GRFree(&pReq->header);
590 }
591
592 /*
593 * Set the handle-in-ring3 indicator. When set Ring-3 will have to work
594 * the balloon changes via the other API.
595 */
596 *pfHandleInR3 = pDevExt->MemBalloon.fUseKernelAPI ? false : true;
597
598 return rc;
599}
600
601
602/**
603 * Inflate/deflate the balloon by one chunk.
604 *
605 * Worker for vgdrvIoCtl_ChangeMemoryBalloon - it takes the mutex.
606 *
607 * @returns VBox status code.
608 * @param pDevExt The device extension.
609 * @param pSession The session.
610 * @param pvChunk The address of the chunk to add to / remove from the
611 * balloon. (user space address)
612 * @param fInflate Inflate if true, deflate if false.
613 */
614static int vgdrvSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, RTR3PTR pvChunk, bool fInflate)
615{
616 VMMDevChangeMemBalloon *pReq;
617 PRTR0MEMOBJ pMemObj = NULL;
618 int rc = VINF_SUCCESS;
619 uint32_t i;
620 RT_NOREF1(pSession);
621
622 if (fInflate)
623 {
624 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
625 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
626 {
627 LogRel(("vgdrvSetBalloonSizeFromUser: cannot inflate balloon, already have %u chunks (max=%u)\n",
628 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
629 return VERR_INVALID_PARAMETER;
630 }
631
632 if (!pDevExt->MemBalloon.paMemObj)
633 {
634 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
635 if (!pDevExt->MemBalloon.paMemObj)
636 {
637 LogRel(("vgdrvSetBalloonSizeFromUser: no memory for paMemObj!\n"));
638 return VERR_NO_MEMORY;
639 }
640 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
641 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
642 }
643 }
644 else
645 {
646 if (pDevExt->MemBalloon.cChunks == 0)
647 {
648 AssertMsgFailed(("vgdrvSetBalloonSizeFromUser: cannot decrease balloon, already at size 0\n"));
649 return VERR_INVALID_PARAMETER;
650 }
651 }
652
653 /*
654 * Enumerate all memory objects and check if the object is already registered.
655 */
656 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
657 {
658 if ( fInflate
659 && !pMemObj
660 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
661 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
662 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == pvChunk)
663 {
664 if (fInflate)
665 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
666 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
667 break;
668 }
669 }
670 if (!pMemObj)
671 {
672 if (fInflate)
673 {
674 /* no free object pointer found -- should not happen */
675 return VERR_NO_MEMORY;
676 }
677
678 /* cannot free this memory as it wasn't provided before */
679 return VERR_NOT_FOUND;
680 }
681
682 /*
683 * Try inflate / default the balloon as requested.
684 */
685 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, g_cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
686 if (RT_FAILURE(rc))
687 return rc;
688
689 if (fInflate)
690 {
691 rc = RTR0MemObjLockUser(pMemObj, pvChunk, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
692 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
693 if (RT_SUCCESS(rc))
694 {
695 rc = vgdrvBalloonInflate(pMemObj, pReq);
696 if (RT_SUCCESS(rc))
697 pDevExt->MemBalloon.cChunks++;
698 else
699 {
700 Log(("vgdrvSetBalloonSizeFromUser(inflate): failed, rc=%Rrc!\n", rc));
701 RTR0MemObjFree(*pMemObj, true);
702 *pMemObj = NIL_RTR0MEMOBJ;
703 }
704 }
705 }
706 else
707 {
708 rc = vgdrvBalloonDeflate(pMemObj, pReq);
709 if (RT_SUCCESS(rc))
710 pDevExt->MemBalloon.cChunks--;
711 else
712 Log(("vgdrvSetBalloonSizeFromUser(deflate): failed, rc=%Rrc!\n", rc));
713 }
714
715 VbglR0GRFree(&pReq->header);
716 return rc;
717}
718
719
720/**
721 * Cleanup the memory balloon of a session.
722 *
723 * Will request the balloon mutex, so it must be valid and the caller must not
724 * own it already.
725 *
726 * @param pDevExt The device extension.
727 * @param pSession The session. Can be NULL at unload.
728 */
729static void vgdrvCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
730{
731 RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
732 if ( pDevExt->MemBalloon.pOwner == pSession
733 || pSession == NULL /*unload*/)
734 {
735 if (pDevExt->MemBalloon.paMemObj)
736 {
737 VMMDevChangeMemBalloon *pReq;
738 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, g_cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
739 if (RT_SUCCESS(rc))
740 {
741 uint32_t i;
742 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
743 {
744 rc = vgdrvBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
745 if (RT_FAILURE(rc))
746 {
747 LogRel(("vgdrvCloseMemBalloon: Deflate failed with rc=%Rrc. Will leak %u chunks.\n",
748 rc, pDevExt->MemBalloon.cChunks));
749 break;
750 }
751 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
752 pDevExt->MemBalloon.cChunks--;
753 }
754 VbglR0GRFree(&pReq->header);
755 }
756 else
757 LogRel(("vgdrvCloseMemBalloon: Failed to allocate VMMDev request buffer (rc=%Rrc). Will leak %u chunks.\n",
758 rc, pDevExt->MemBalloon.cChunks));
759 RTMemFree(pDevExt->MemBalloon.paMemObj);
760 pDevExt->MemBalloon.paMemObj = NULL;
761 }
762
763 pDevExt->MemBalloon.pOwner = NULL;
764 }
765 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
766}
767
768/** @} */
769
770
771
772/** @name Heartbeat
773 * @{
774 */
775
776/**
777 * Sends heartbeat to host.
778 *
779 * @returns VBox status code.
780 */
781static int vgdrvHeartbeatSend(PVBOXGUESTDEVEXT pDevExt)
782{
783 int rc;
784 if (pDevExt->pReqGuestHeartbeat)
785 {
786 rc = VbglR0GRPerform(pDevExt->pReqGuestHeartbeat);
787 Log3(("vgdrvHeartbeatSend: VbglR0GRPerform vgdrvHeartbeatSend completed with rc=%Rrc\n", rc));
788 }
789 else
790 rc = VERR_INVALID_STATE;
791 return rc;
792}
793
794
795/**
796 * Callback for heartbeat timer.
797 */
798static DECLCALLBACK(void) vgdrvHeartbeatTimerHandler(PRTTIMER hTimer, void *pvUser, uint64_t iTick)
799{
800 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
801 int rc;
802 AssertReturnVoid(pDevExt);
803
804 rc = vgdrvHeartbeatSend(pDevExt);
805 if (RT_FAILURE(rc))
806 Log(("HB Timer: vgdrvHeartbeatSend failed: rc=%Rrc\n", rc));
807
808 NOREF(hTimer); NOREF(iTick);
809}
810
811
812/**
813 * Configure the host to check guest's heartbeat
814 * and get heartbeat interval from the host.
815 *
816 * @returns VBox status code.
817 * @param pDevExt The device extension.
818 * @param fEnabled Set true to enable guest heartbeat checks on host.
819 */
820static int vgdrvHeartbeatHostConfigure(PVBOXGUESTDEVEXT pDevExt, bool fEnabled)
821{
822 VMMDevReqHeartbeat *pReq;
823 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_HeartbeatConfigure);
824 Log(("vgdrvHeartbeatHostConfigure: VbglR0GRAlloc vgdrvHeartbeatHostConfigure completed with rc=%Rrc\n", rc));
825 if (RT_SUCCESS(rc))
826 {
827 pReq->fEnabled = fEnabled;
828 pReq->cNsInterval = 0;
829 rc = VbglR0GRPerform(&pReq->header);
830 Log(("vgdrvHeartbeatHostConfigure: VbglR0GRPerform vgdrvHeartbeatHostConfigure completed with rc=%Rrc\n", rc));
831 pDevExt->cNsHeartbeatInterval = pReq->cNsInterval;
832 VbglR0GRFree(&pReq->header);
833 }
834 return rc;
835}
836
837
838/**
839 * Initializes the heartbeat timer.
840 *
841 * This feature may be disabled by the host.
842 *
843 * @returns VBox status (ignored).
844 * @param pDevExt The device extension.
845 */
846static int vgdrvHeartbeatInit(PVBOXGUESTDEVEXT pDevExt)
847{
848 /*
849 * Make sure that heartbeat checking is disabled.
850 */
851 int rc = vgdrvHeartbeatHostConfigure(pDevExt, false);
852 if (RT_SUCCESS(rc))
853 {
854 rc = vgdrvHeartbeatHostConfigure(pDevExt, true);
855 if (RT_SUCCESS(rc))
856 {
857 /*
858 * Preallocate the request to use it from the timer callback because:
859 * 1) on Windows VbglR0GRAlloc must be called at IRQL <= APC_LEVEL
860 * and the timer callback runs at DISPATCH_LEVEL;
861 * 2) avoid repeated allocations.
862 */
863 rc = VbglR0GRAlloc(&pDevExt->pReqGuestHeartbeat, sizeof(*pDevExt->pReqGuestHeartbeat), VMMDevReq_GuestHeartbeat);
864 if (RT_SUCCESS(rc))
865 {
866 LogRel(("vgdrvHeartbeatInit: Setting up heartbeat to trigger every %RU64 milliseconds\n",
867 pDevExt->cNsHeartbeatInterval / RT_NS_1MS));
868 rc = RTTimerCreateEx(&pDevExt->pHeartbeatTimer, pDevExt->cNsHeartbeatInterval, 0 /*fFlags*/,
869 (PFNRTTIMER)vgdrvHeartbeatTimerHandler, pDevExt);
870 if (RT_SUCCESS(rc))
871 {
872 rc = RTTimerStart(pDevExt->pHeartbeatTimer, 0);
873 if (RT_SUCCESS(rc))
874 return VINF_SUCCESS;
875
876 LogRel(("vgdrvHeartbeatInit: Heartbeat timer failed to start, rc=%Rrc\n", rc));
877 }
878 else
879 LogRel(("vgdrvHeartbeatInit: Failed to create heartbeat timer: %Rrc\n", rc));
880
881 VbglR0GRFree(pDevExt->pReqGuestHeartbeat);
882 pDevExt->pReqGuestHeartbeat = NULL;
883 }
884 else
885 LogRel(("vgdrvHeartbeatInit: VbglR0GRAlloc(VMMDevReq_GuestHeartbeat): %Rrc\n", rc));
886
887 LogRel(("vgdrvHeartbeatInit: Failed to set up the timer, guest heartbeat is disabled\n"));
888 vgdrvHeartbeatHostConfigure(pDevExt, false);
889 }
890 else
891 LogRel(("vgdrvHeartbeatInit: Failed to configure host for heartbeat checking: rc=%Rrc\n", rc));
892 }
893 return rc;
894}
895
896/** @} */
897
898
899/**
900 * Helper to reinit the VMMDev communication after hibernation.
901 *
902 * @returns VBox status code.
903 * @param pDevExt The device extension.
904 * @param enmOSType The OS type.
905 *
906 * @todo Call this on all platforms, not just windows.
907 */
908int VGDrvCommonReinitDevExtAfterHibernation(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
909{
910 int rc = vgdrvReportGuestInfo(enmOSType);
911 if (RT_SUCCESS(rc))
912 {
913 rc = vgdrvReportDriverStatus(true /* Driver is active */);
914 if (RT_FAILURE(rc))
915 Log(("VGDrvCommonReinitDevExtAfterHibernation: could not report guest driver status, rc=%Rrc\n", rc));
916 }
917 else
918 Log(("VGDrvCommonReinitDevExtAfterHibernation: could not report guest information to host, rc=%Rrc\n", rc));
919 LogFlow(("VGDrvCommonReinitDevExtAfterHibernation: returned with rc=%Rrc\n", rc));
920 RT_NOREF1(pDevExt);
921 return rc;
922}
923
924
925/**
926 * Initializes the release logger (debug is implicit), if configured.
927 *
928 * @returns IPRT status code.
929 */
930int VGDrvCommonInitLoggers(void)
931{
932#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
933 /*
934 * Create the release log.
935 */
936 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
937 PRTLOGGER pRelLogger;
938 int rc = RTLogCreate(&pRelLogger, 0 /*fFlags*/, "all", "VBOXGUEST_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups,
939 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
940 if (RT_SUCCESS(rc))
941 RTLogRelSetDefaultInstance(pRelLogger);
942 /** @todo Add native hook for getting logger config parameters and setting
943 * them. On linux we should use the module parameter stuff... */
944 return rc;
945#else
946 return VINF_SUCCESS;
947#endif
948}
949
950
951/**
952 * Destroys the loggers.
953 */
954void VGDrvCommonDestroyLoggers(void)
955{
956#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
957 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
958 RTLogDestroy(RTLogSetDefaultInstance(NULL));
959#endif
960}
961
962
963/**
964 * Initialize the device extension fundament.
965 *
966 * There are no device resources at this point, VGDrvCommonInitDevExtResources
967 * should be called when they are available.
968 *
969 * @returns VBox status code.
970 * @param pDevExt The device extension to init.
971 */
972int VGDrvCommonInitDevExtFundament(PVBOXGUESTDEVEXT pDevExt)
973{
974 int rc;
975 AssertMsg( pDevExt->uInitState != VBOXGUESTDEVEXT_INIT_STATE_FUNDAMENT
976 && pDevExt->uInitState != VBOXGUESTDEVEXT_INIT_STATE_RESOURCES, ("uInitState=%#x\n", pDevExt->uInitState));
977
978 /*
979 * Initialize the data.
980 */
981 pDevExt->IOPortBase = UINT16_MAX;
982 pDevExt->pVMMDevMemory = NULL;
983 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
984 pDevExt->EventSpinlock = NIL_RTSPINLOCK;
985 pDevExt->pIrqAckEvents = NULL;
986 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
987 RTListInit(&pDevExt->WaitList);
988#ifdef VBOX_WITH_HGCM
989 RTListInit(&pDevExt->HGCMWaitList);
990#endif
991#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
992 RTListInit(&pDevExt->WakeUpList);
993#endif
994 RTListInit(&pDevExt->WokenUpList);
995 RTListInit(&pDevExt->FreeList);
996 RTListInit(&pDevExt->SessionList);
997 pDevExt->cSessions = 0;
998 pDevExt->fLoggingEnabled = false;
999 pDevExt->f32PendingEvents = 0;
1000 pDevExt->u32MousePosChangedSeq = 0;
1001 pDevExt->SessionSpinlock = NIL_RTSPINLOCK;
1002 pDevExt->MemBalloon.hMtx = NIL_RTSEMFASTMUTEX;
1003 pDevExt->MemBalloon.cChunks = 0;
1004 pDevExt->MemBalloon.cMaxChunks = 0;
1005 pDevExt->MemBalloon.fUseKernelAPI = true;
1006 pDevExt->MemBalloon.paMemObj = NULL;
1007 pDevExt->MemBalloon.pOwner = NULL;
1008 pDevExt->pfnMouseNotifyCallback = NULL;
1009 pDevExt->pvMouseNotifyCallbackArg = NULL;
1010 pDevExt->pReqGuestHeartbeat = NULL;
1011
1012 pDevExt->fFixedEvents = 0;
1013 vgdrvBitUsageTrackerClear(&pDevExt->EventFilterTracker);
1014 pDevExt->fEventFilterHost = UINT32_MAX; /* forces a report */
1015
1016 vgdrvBitUsageTrackerClear(&pDevExt->MouseStatusTracker);
1017 pDevExt->fMouseStatusHost = UINT32_MAX; /* forces a report */
1018
1019 pDevExt->fAcquireModeGuestCaps = 0;
1020 pDevExt->fSetModeGuestCaps = 0;
1021 pDevExt->fAcquiredGuestCaps = 0;
1022 vgdrvBitUsageTrackerClear(&pDevExt->SetGuestCapsTracker);
1023 pDevExt->fGuestCapsHost = UINT32_MAX; /* forces a report */
1024
1025 /*
1026 * Create the wait and session spinlocks as well as the ballooning mutex.
1027 */
1028 rc = RTSpinlockCreate(&pDevExt->EventSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestEvent");
1029 if (RT_SUCCESS(rc))
1030 {
1031 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestSession");
1032 if (RT_SUCCESS(rc))
1033 {
1034 rc = RTSemFastMutexCreate(&pDevExt->MemBalloon.hMtx);
1035 if (RT_SUCCESS(rc))
1036 {
1037 pDevExt->uInitState = VBOXGUESTDEVEXT_INIT_STATE_FUNDAMENT;
1038 return VINF_SUCCESS;
1039 }
1040
1041 LogRel(("VGDrvCommonInitDevExt: failed to create mutex, rc=%Rrc!\n", rc));
1042 RTSpinlockDestroy(pDevExt->SessionSpinlock);
1043 }
1044 else
1045 LogRel(("VGDrvCommonInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
1046 RTSpinlockDestroy(pDevExt->EventSpinlock);
1047 }
1048 else
1049 LogRel(("VGDrvCommonInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
1050
1051 pDevExt->uInitState = 0;
1052 return rc;
1053}
1054
1055
1056/**
1057 * Counter to VGDrvCommonInitDevExtFundament.
1058 *
1059 * @param pDevExt The device extension.
1060 */
1061void VGDrvCommonDeleteDevExtFundament(PVBOXGUESTDEVEXT pDevExt)
1062{
1063 int rc2;
1064 AssertMsgReturnVoid(pDevExt->uInitState == VBOXGUESTDEVEXT_INIT_STATE_FUNDAMENT, ("uInitState=%#x\n", pDevExt->uInitState));
1065 pDevExt->uInitState = VBOXGUESTDEVEXT_INIT_STATE_DELETED;
1066
1067 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
1068 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
1069 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
1070}
1071
1072
1073/**
1074 * Initializes the VBoxGuest device extension resource parts.
1075 *
1076 * The native code locates the VMMDev on the PCI bus and retrieve the MMIO and
1077 * I/O port ranges, this function will take care of mapping the MMIO memory (if
1078 * present). Upon successful return the native code should set up the interrupt
1079 * handler.
1080 *
1081 * @returns VBox status code.
1082 *
1083 * @param pDevExt The device extension. Allocated by the native code.
1084 * @param IOPortBase The base of the I/O port range.
1085 * @param pvMMIOBase The base of the MMIO memory mapping.
1086 * This is optional, pass NULL if not present.
1087 * @param cbMMIO The size of the MMIO memory mapping.
1088 * This is optional, pass 0 if not present.
1089 * @param enmOSType The guest OS type to report to the VMMDev.
1090 * @param fFixedEvents Events that will be enabled upon init and no client
1091 * will ever be allowed to mask.
1092 */
1093int VGDrvCommonInitDevExtResources(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
1094 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
1095{
1096 int rc;
1097 AssertMsgReturn(pDevExt->uInitState == VBOXGUESTDEVEXT_INIT_STATE_FUNDAMENT, ("uInitState=%#x\n", pDevExt->uInitState),
1098 VERR_INVALID_STATE);
1099
1100 /*
1101 * If there is an MMIO region validate the version and size.
1102 */
1103 if (pvMMIOBase)
1104 {
1105 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
1106 Assert(cbMMIO);
1107 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
1108 && pVMMDev->u32Size >= 32
1109 && pVMMDev->u32Size <= cbMMIO)
1110 {
1111 pDevExt->pVMMDevMemory = pVMMDev;
1112 Log(("VGDrvCommonInitDevExtResources: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
1113 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
1114 }
1115 else /* try live without it. */
1116 LogRel(("VGDrvCommonInitDevExtResources: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
1117 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
1118 }
1119
1120 /*
1121 * Initialize the guest library and report the guest info back to VMMDev,
1122 * set the interrupt control filter mask, and fixate the guest mappings
1123 * made by the VMM.
1124 */
1125 pDevExt->IOPortBase = IOPortBase;
1126 rc = VbglR0InitPrimary(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
1127 if (RT_SUCCESS(rc))
1128 {
1129 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
1130 if (RT_SUCCESS(rc))
1131 {
1132 pDevExt->PhysIrqAckEvents = VbglR0PhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
1133 Assert(pDevExt->PhysIrqAckEvents != 0);
1134
1135 rc = vgdrvReportGuestInfo(enmOSType);
1136 if (RT_SUCCESS(rc))
1137 {
1138 /*
1139 * Set the fixed event and make sure the host doesn't have any lingering
1140 * the guest capabilities or mouse status bits set.
1141 */
1142#ifdef VBOX_WITH_HGCM
1143 fFixedEvents |= VMMDEV_EVENT_HGCM;
1144#endif
1145 pDevExt->fFixedEvents = fFixedEvents;
1146 rc = vgdrvResetEventFilterOnHost(pDevExt, fFixedEvents);
1147 if (RT_SUCCESS(rc))
1148 {
1149 rc = vgdrvResetCapabilitiesOnHost(pDevExt);
1150 if (RT_SUCCESS(rc))
1151 {
1152 rc = vgdrvResetMouseStatusOnHost(pDevExt);
1153 if (RT_SUCCESS(rc))
1154 {
1155 /*
1156 * Initialize stuff which may fail without requiring the driver init to fail.
1157 */
1158 vgdrvInitFixateGuestMappings(pDevExt);
1159 vgdrvHeartbeatInit(pDevExt);
1160
1161 /*
1162 * Done!
1163 */
1164 rc = vgdrvReportDriverStatus(true /* Driver is active */);
1165 if (RT_FAILURE(rc))
1166 LogRel(("VGDrvCommonInitDevExtResources: VBoxReportGuestDriverStatus failed, rc=%Rrc\n", rc));
1167
1168 pDevExt->uInitState = VBOXGUESTDEVEXT_INIT_STATE_RESOURCES;
1169 LogFlowFunc(("VGDrvCommonInitDevExtResources: returns success\n"));
1170 return VINF_SUCCESS;
1171 }
1172 LogRel(("VGDrvCommonInitDevExtResources: failed to clear mouse status: rc=%Rrc\n", rc));
1173 }
1174 else
1175 LogRel(("VGDrvCommonInitDevExtResources: failed to clear guest capabilities: rc=%Rrc\n", rc));
1176 }
1177 else
1178 LogRel(("VGDrvCommonInitDevExtResources: failed to set fixed event filter: rc=%Rrc\n", rc));
1179 pDevExt->fFixedEvents = 0;
1180 }
1181 else
1182 LogRel(("VGDrvCommonInitDevExtResources: vgdrvReportGuestInfo failed: rc=%Rrc\n", rc));
1183 VbglR0GRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
1184 }
1185 else
1186 LogRel(("VGDrvCommonInitDevExtResources: VbglR0GRAlloc failed: rc=%Rrc\n", rc));
1187
1188 VbglR0TerminatePrimary();
1189 }
1190 else
1191 LogRel(("VGDrvCommonInitDevExtResources: VbglR0InitPrimary failed: rc=%Rrc\n", rc));
1192 pDevExt->IOPortBase = UINT16_MAX;
1193 return rc;
1194}
1195
1196
1197/**
1198 * Deletes all the items in a wait chain.
1199 * @param pList The head of the chain.
1200 */
1201static void vgdrvDeleteWaitList(PRTLISTNODE pList)
1202{
1203 while (!RTListIsEmpty(pList))
1204 {
1205 int rc2;
1206 PVBOXGUESTWAIT pWait = RTListGetFirst(pList, VBOXGUESTWAIT, ListNode);
1207 RTListNodeRemove(&pWait->ListNode);
1208
1209 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
1210 pWait->Event = NIL_RTSEMEVENTMULTI;
1211 pWait->pSession = NULL;
1212 RTMemFree(pWait);
1213 }
1214}
1215
1216
1217/**
1218 * Counter to VGDrvCommonInitDevExtResources.
1219 *
1220 * @param pDevExt The device extension.
1221 */
1222void VGDrvCommonDeleteDevExtResources(PVBOXGUESTDEVEXT pDevExt)
1223{
1224 Log(("VGDrvCommonDeleteDevExtResources:\n"));
1225 AssertMsgReturnVoid(pDevExt->uInitState == VBOXGUESTDEVEXT_INIT_STATE_RESOURCES, ("uInitState=%#x\n", pDevExt->uInitState));
1226 pDevExt->uInitState = VBOXGUESTDEVEXT_INIT_STATE_FUNDAMENT;
1227
1228 /*
1229 * Stop and destroy HB timer and disable host heartbeat checking.
1230 */
1231 if (pDevExt->pHeartbeatTimer)
1232 {
1233 RTTimerDestroy(pDevExt->pHeartbeatTimer);
1234 vgdrvHeartbeatHostConfigure(pDevExt, false);
1235 }
1236
1237 VbglR0GRFree(pDevExt->pReqGuestHeartbeat);
1238 pDevExt->pReqGuestHeartbeat = NULL;
1239
1240 /*
1241 * Clean up the bits that involves the host first.
1242 */
1243 vgdrvTermUnfixGuestMappings(pDevExt);
1244 if (!RTListIsEmpty(&pDevExt->SessionList))
1245 {
1246 LogRelFunc(("session list not empty!\n"));
1247 RTListInit(&pDevExt->SessionList);
1248 }
1249
1250 /*
1251 * Update the host flags (mouse status etc) not to reflect this session.
1252 */
1253 pDevExt->fFixedEvents = 0;
1254 vgdrvResetEventFilterOnHost(pDevExt, 0 /*fFixedEvents*/);
1255 vgdrvResetCapabilitiesOnHost(pDevExt);
1256 vgdrvResetMouseStatusOnHost(pDevExt);
1257
1258 vgdrvCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
1259
1260 /*
1261 * Cleanup all the other resources.
1262 */
1263 vgdrvDeleteWaitList(&pDevExt->WaitList);
1264#ifdef VBOX_WITH_HGCM
1265 vgdrvDeleteWaitList(&pDevExt->HGCMWaitList);
1266#endif
1267#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1268 vgdrvDeleteWaitList(&pDevExt->WakeUpList);
1269#endif
1270 vgdrvDeleteWaitList(&pDevExt->WokenUpList);
1271 vgdrvDeleteWaitList(&pDevExt->FreeList);
1272
1273 VbglR0TerminatePrimary();
1274
1275
1276 pDevExt->pVMMDevMemory = NULL;
1277 pDevExt->IOPortBase = 0;
1278 pDevExt->pIrqAckEvents = NULL; /* Freed by VbglR0TerminatePrimary. */
1279}
1280
1281
1282/**
1283 * Initializes the VBoxGuest device extension when the device driver is loaded.
1284 *
1285 * The native code locates the VMMDev on the PCI bus and retrieve the MMIO and
1286 * I/O port ranges, this function will take care of mapping the MMIO memory (if
1287 * present). Upon successful return the native code should set up the interrupt
1288 * handler.
1289 *
1290 * Instead of calling this method, the host specific code choose to perform a
1291 * more granular initialization using:
1292 * 1. VGDrvCommonInitLoggers
1293 * 2. VGDrvCommonInitDevExtFundament
1294 * 3. VGDrvCommonInitDevExtResources
1295 *
1296 * @returns VBox status code.
1297 *
1298 * @param pDevExt The device extension. Allocated by the native code.
1299 * @param IOPortBase The base of the I/O port range.
1300 * @param pvMMIOBase The base of the MMIO memory mapping.
1301 * This is optional, pass NULL if not present.
1302 * @param cbMMIO The size of the MMIO memory mapping.
1303 * This is optional, pass 0 if not present.
1304 * @param enmOSType The guest OS type to report to the VMMDev.
1305 * @param fFixedEvents Events that will be enabled upon init and no client
1306 * will ever be allowed to mask.
1307 */
1308int VGDrvCommonInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
1309 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
1310{
1311 int rc;
1312 VGDrvCommonInitLoggers();
1313
1314 rc = VGDrvCommonInitDevExtFundament(pDevExt);
1315 if (RT_SUCCESS(rc))
1316 {
1317 rc = VGDrvCommonInitDevExtResources(pDevExt, IOPortBase, pvMMIOBase, cbMMIO, enmOSType, fFixedEvents);
1318 if (RT_SUCCESS(rc))
1319 return rc;
1320
1321 VGDrvCommonDeleteDevExtFundament(pDevExt);
1322 }
1323 VGDrvCommonDestroyLoggers();
1324 return rc; /* (failed) */
1325}
1326
1327
1328/**
1329 * Checks if the given option can be taken to not mean 'false'.
1330 *
1331 * @returns true or false accordingly.
1332 * @param pszValue The value to consider.
1333 */
1334bool VBDrvCommonIsOptionValueTrue(const char *pszValue)
1335{
1336 if (pszValue)
1337 {
1338 char ch;
1339 while ( (ch = *pszValue) != '\0'
1340 && RT_C_IS_SPACE(ch))
1341 pszValue++;
1342
1343 return ch != '\0'
1344 && ch != 'n' /* no */
1345 && ch != 'N' /* NO */
1346 && ch != 'd' /* disabled */
1347 && ch != 'f' /* false*/
1348 && ch != 'F' /* FALSE */
1349 && ch != 'D' /* DISABLED */
1350 && ( (ch != 'o' && ch != 'O') /* off, OFF, Off */
1351 || (pszValue[1] != 'f' && pszValue[1] != 'F') )
1352 && (ch != '0' || pszValue[1] != '\0') /* '0' */
1353 ;
1354 }
1355 return false;
1356}
1357
1358
1359/**
1360 * Processes a option.
1361 *
1362 * This will let the OS specific code have a go at it too.
1363 *
1364 * @param pDevExt The device extension.
1365 * @param pszName The option name, sans prefix.
1366 * @param pszValue The option value.
1367 */
1368void VGDrvCommonProcessOption(PVBOXGUESTDEVEXT pDevExt, const char *pszName, const char *pszValue)
1369{
1370 Log(("VGDrvCommonProcessOption: pszName='%s' pszValue='%s'\n", pszName, pszValue));
1371
1372 if ( RTStrICmpAscii(pszName, "r3_log_to_host") == 0
1373 || RTStrICmpAscii(pszName, "LoggingEnabled") == 0 /*legacy*/ )
1374 pDevExt->fLoggingEnabled = VBDrvCommonIsOptionValueTrue(pszValue);
1375 else if ( RTStrNICmpAscii(pszName, RT_STR_TUPLE("log")) == 0
1376 || RTStrNICmpAscii(pszName, RT_STR_TUPLE("dbg_log")) == 0)
1377 {
1378 bool const fLogRel = *pszName == 'd' || *pszName == 'D';
1379 const char *pszSubName = &pszName[fLogRel ? 4 + 3 : 3];
1380 if ( !*pszSubName
1381 || RTStrICmpAscii(pszSubName, "_flags") == 0
1382 || RTStrICmpAscii(pszSubName, "_dest") == 0)
1383 {
1384 PRTLOGGER pLogger = fLogRel ? RTLogRelGetDefaultInstance() : RTLogDefaultInstance();
1385 if (pLogger)
1386 {
1387 if (!*pszSubName)
1388 RTLogGroupSettings(pLogger, pszValue);
1389 else if (RTStrICmpAscii(pszSubName, "_flags"))
1390 RTLogFlags(pLogger, pszValue);
1391 else
1392 RTLogDestinations(pLogger, pszValue);
1393 }
1394 }
1395 else if (!VGDrvNativeProcessOption(pDevExt, pszName, pszValue))
1396 LogRel(("VBoxGuest: Ignoring unknown option '%s' (value '%s')\n", pszName, pszValue));
1397 }
1398 else if (!VGDrvNativeProcessOption(pDevExt, pszName, pszValue))
1399 LogRel(("VBoxGuest: Ignoring unknown option '%s' (value '%s')\n", pszName, pszValue));
1400}
1401
1402
1403/**
1404 * Read driver configuration from the host.
1405 *
1406 * This involves connecting to the guest properties service, which means that
1407 * interrupts needs to work and that the calling thread must be able to block.
1408 *
1409 * @param pDevExt The device extension.
1410 */
1411void VGDrvCommonProcessOptionsFromHost(PVBOXGUESTDEVEXT pDevExt)
1412{
1413 /*
1414 * Create a kernel session without our selves, then connect to the HGCM service.
1415 */
1416 PVBOXGUESTSESSION pSession;
1417 int rc = VGDrvCommonCreateKernelSession(pDevExt, &pSession);
1418 if (RT_SUCCESS(rc))
1419 {
1420 union
1421 {
1422 VBGLIOCHGCMCONNECT Connect;
1423 VBGLIOCHGCMDISCONNECT Disconnect;
1424 GuestPropMsgEnumProperties EnumMsg;
1425 } uBuf;
1426
1427 RT_ZERO(uBuf.Connect);
1428 VBGLREQHDR_INIT(&uBuf.Connect.Hdr, HGCM_CONNECT);
1429 uBuf.Connect.u.In.Loc.type = VMMDevHGCMLoc_LocalHost_Existing;
1430 RTStrCopy(uBuf.Connect.u.In.Loc.u.host.achName, sizeof(uBuf.Connect.u.In.Loc.u.host.achName),
1431 "VBoxGuestPropSvc"); /** @todo Add a define to the header for the name. */
1432 rc = VGDrvCommonIoCtl(VBGL_IOCTL_HGCM_CONNECT, pDevExt, pSession, &uBuf.Connect.Hdr, sizeof(uBuf.Connect));
1433 if (RT_SUCCESS(rc))
1434 {
1435 static const char g_szzPattern[] = "/VirtualBox/GuestAdd/VBoxGuest/*\0";
1436 uint32_t const idClient = uBuf.Connect.u.Out.idClient;
1437 char *pszzStrings = NULL;
1438 uint32_t cbStrings;
1439
1440 /*
1441 * Enumerate all the relevant properties. We try with a 1KB buffer, but
1442 * will double it until we get what we want or go beyond 16KB.
1443 */
1444 for (cbStrings = _1K; cbStrings <= _16K; cbStrings *= 2)
1445 {
1446 pszzStrings = (char *)RTMemAllocZ(cbStrings);
1447 if (pszzStrings)
1448 {
1449 VBGL_HGCM_HDR_INIT(&uBuf.EnumMsg.hdr, idClient, GUEST_PROP_FN_ENUM_PROPS, 3);
1450
1451 uBuf.EnumMsg.patterns.type = VMMDevHGCMParmType_LinAddr;
1452 uBuf.EnumMsg.patterns.u.Pointer.size = sizeof(g_szzPattern);
1453 uBuf.EnumMsg.patterns.u.Pointer.u.linearAddr = (uintptr_t)g_szzPattern;
1454
1455 uBuf.EnumMsg.strings.type = VMMDevHGCMParmType_LinAddr;
1456 uBuf.EnumMsg.strings.u.Pointer.size = cbStrings;
1457 uBuf.EnumMsg.strings.u.Pointer.u.linearAddr = (uintptr_t)pszzStrings;
1458
1459 uBuf.EnumMsg.size.type = VMMDevHGCMParmType_32bit;
1460 uBuf.EnumMsg.size.u.value32 = 0;
1461
1462 rc = VGDrvCommonIoCtl(VBGL_IOCTL_HGCM_CALL(sizeof(uBuf.EnumMsg)), pDevExt, pSession,
1463 &uBuf.EnumMsg.hdr.Hdr, sizeof(uBuf.EnumMsg));
1464 if (RT_SUCCESS(rc))
1465 {
1466 if ( uBuf.EnumMsg.size.type == VMMDevHGCMParmType_32bit
1467 && uBuf.EnumMsg.size.u.value32 <= cbStrings
1468 && uBuf.EnumMsg.size.u.value32 > 0)
1469 cbStrings = uBuf.EnumMsg.size.u.value32;
1470 Log(("VGDrvCommonReadConfigurationFromHost: GUEST_PROP_FN_ENUM_PROPS -> %#x bytes (cbStrings=%#x)\n",
1471 uBuf.EnumMsg.size.u.value32, cbStrings));
1472 break;
1473 }
1474
1475 RTMemFree(pszzStrings);
1476 pszzStrings = NULL;
1477 }
1478 else
1479 {
1480 LogRel(("VGDrvCommonReadConfigurationFromHost: failed to allocate %#x bytes\n", cbStrings));
1481 break;
1482 }
1483 }
1484
1485 /*
1486 * Disconnect and destroy the session.
1487 */
1488 VBGLREQHDR_INIT(&uBuf.Disconnect.Hdr, HGCM_DISCONNECT);
1489 uBuf.Disconnect.u.In.idClient = idClient;
1490 VGDrvCommonIoCtl(VBGL_IOCTL_HGCM_DISCONNECT, pDevExt, pSession, &uBuf.Disconnect.Hdr, sizeof(uBuf.Disconnect));
1491
1492 VGDrvCommonCloseSession(pDevExt, pSession);
1493
1494 /*
1495 * Process the properties if we got any.
1496 *
1497 * The string buffer contains packed strings in groups of four - name, value,
1498 * timestamp (as a decimal string) and flags. It is terminated by four empty
1499 * strings. Layout:
1500 * Name\0Value\0Timestamp\0Flags\0
1501 */
1502 if (pszzStrings)
1503 {
1504 uint32_t off;
1505 for (off = 0; off < cbStrings; off++)
1506 {
1507 /*
1508 * Parse the four fields, checking that it's all plain ASCII w/o any control characters.
1509 */
1510 const char *apszFields[4] = { NULL, NULL, NULL, NULL };
1511 bool fValidFields = true;
1512 unsigned iField;
1513 for (iField = 0; iField < RT_ELEMENTS(apszFields); iField++)
1514 {
1515 apszFields[0] = &pszzStrings[off];
1516 while (off < cbStrings)
1517 {
1518 char ch = pszzStrings[off++];
1519 if ((unsigned)ch < 0x20U || (unsigned)ch > 0x7fU)
1520 {
1521 if (!ch)
1522 break;
1523 if (fValidFields)
1524 Log(("VGDrvCommonReadConfigurationFromHost: Invalid char %#x at %#x (field %u)\n",
1525 ch, off - 1, iField));
1526 fValidFields = false;
1527 }
1528 }
1529 }
1530 if ( off <= cbStrings
1531 && fValidFields
1532 && *apszFields[0] != '\0')
1533 {
1534 /*
1535 * Validate and convert the flags to integer, then process the option.
1536 */
1537 uint32_t fFlags = 0;
1538 rc = GuestPropValidateFlags(apszFields[3], &fFlags);
1539 if (RT_SUCCESS(rc))
1540 {
1541 if (fFlags & GUEST_PROP_F_RDONLYGUEST)
1542 {
1543 apszFields[0] += sizeof(g_szzPattern) - 2;
1544 VGDrvCommonProcessOption(pDevExt, apszFields[0], apszFields[1]);
1545 }
1546 else
1547 LogRel(("VBoxGuest: Ignoring '%s' as it does not have RDONLYGUEST set\n", apszFields[0]));
1548 }
1549 else
1550 LogRel(("VBoxGuest: Invalid flags '%s' for '%s': %Rrc\n", apszFields[2], apszFields[0], rc));
1551 }
1552 else if (off < cbStrings)
1553 {
1554 LogRel(("VBoxGuest: Malformed guest properties enum result!\n"));
1555 Log(("VBoxGuest: off=%#x cbStrings=%#x\n%.*Rhxd\n", off, cbStrings, cbStrings, pszzStrings));
1556 break;
1557 }
1558 else if (!fValidFields)
1559 LogRel(("VBoxGuest: Ignoring %.*Rhxs as it has invalid characters in one or more fields\n",
1560 (int)strlen(apszFields[0]), apszFields[0]));
1561 else
1562 break;
1563 }
1564
1565 RTMemFree(pszzStrings);
1566 }
1567 else
1568 LogRel(("VGDrvCommonReadConfigurationFromHost: failed to enumerate '%s': %Rrc\n", g_szzPattern, rc));
1569
1570 }
1571 else
1572 LogRel(("VGDrvCommonReadConfigurationFromHost: failed to connect: %Rrc\n", rc));
1573 }
1574 else
1575 LogRel(("VGDrvCommonReadConfigurationFromHost: failed to connect: %Rrc\n", rc));
1576}
1577
1578
1579/**
1580 * Destroys the VBoxGuest device extension.
1581 *
1582 * The native code should call this before the driver is loaded,
1583 * but don't call this on shutdown.
1584 *
1585 * @param pDevExt The device extension.
1586 */
1587void VGDrvCommonDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
1588{
1589 Log(("VGDrvCommonDeleteDevExt:\n"));
1590 Log(("VBoxGuest: The additions driver is terminating.\n"));
1591 VGDrvCommonDeleteDevExtResources(pDevExt);
1592 VGDrvCommonDeleteDevExtFundament(pDevExt);
1593 VGDrvCommonDestroyLoggers();
1594}
1595
1596
1597/**
1598 * Creates a VBoxGuest user session.
1599 *
1600 * The native code calls this when a ring-3 client opens the device.
1601 * Use VGDrvCommonCreateKernelSession when a ring-0 client connects.
1602 *
1603 * @returns VBox status code.
1604 * @param pDevExt The device extension.
1605 * @param ppSession Where to store the session on success.
1606 */
1607int VGDrvCommonCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
1608{
1609 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
1610 if (RT_UNLIKELY(!pSession))
1611 {
1612 LogRel(("VGDrvCommonCreateUserSession: no memory!\n"));
1613 return VERR_NO_MEMORY;
1614 }
1615
1616 pSession->Process = RTProcSelf();
1617 pSession->R0Process = RTR0ProcHandleSelf();
1618 pSession->pDevExt = pDevExt;
1619 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1620 RTListAppend(&pDevExt->SessionList, &pSession->ListNode);
1621 pDevExt->cSessions++;
1622 RTSpinlockRelease(pDevExt->SessionSpinlock);
1623
1624 *ppSession = pSession;
1625 LogFlow(("VGDrvCommonCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1626 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1627 return VINF_SUCCESS;
1628}
1629
1630
1631/**
1632 * Creates a VBoxGuest kernel session.
1633 *
1634 * The native code calls this when a ring-0 client connects to the device.
1635 * Use VGDrvCommonCreateUserSession when a ring-3 client opens the device.
1636 *
1637 * @returns VBox status code.
1638 * @param pDevExt The device extension.
1639 * @param ppSession Where to store the session on success.
1640 */
1641int VGDrvCommonCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
1642{
1643 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
1644 if (RT_UNLIKELY(!pSession))
1645 {
1646 LogRel(("VGDrvCommonCreateKernelSession: no memory!\n"));
1647 return VERR_NO_MEMORY;
1648 }
1649
1650 pSession->Process = NIL_RTPROCESS;
1651 pSession->R0Process = NIL_RTR0PROCESS;
1652 pSession->pDevExt = pDevExt;
1653 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1654 RTListAppend(&pDevExt->SessionList, &pSession->ListNode);
1655 pDevExt->cSessions++;
1656 RTSpinlockRelease(pDevExt->SessionSpinlock);
1657
1658 *ppSession = pSession;
1659 LogFlow(("VGDrvCommonCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1660 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1661 return VINF_SUCCESS;
1662}
1663
1664
1665/**
1666 * Closes a VBoxGuest session.
1667 *
1668 * @param pDevExt The device extension.
1669 * @param pSession The session to close (and free).
1670 */
1671void VGDrvCommonCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1672{
1673#ifdef VBOX_WITH_HGCM
1674 unsigned i;
1675#endif
1676 LogFlow(("VGDrvCommonCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1677 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1678
1679 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1680 RTListNodeRemove(&pSession->ListNode);
1681 pDevExt->cSessions--;
1682 RTSpinlockRelease(pDevExt->SessionSpinlock);
1683 vgdrvAcquireSessionCapabilities(pDevExt, pSession, 0, UINT32_MAX, VBGL_IOC_AGC_FLAGS_DEFAULT, true /*fSessionTermination*/);
1684 vgdrvSetSessionCapabilities(pDevExt, pSession, 0 /*fOrMask*/, UINT32_MAX /*fNotMask*/,
1685 NULL /*pfSessionCaps*/, NULL /*pfGlobalCaps*/, true /*fSessionTermination*/);
1686 vgdrvSetSessionEventFilter(pDevExt, pSession, 0 /*fOrMask*/, UINT32_MAX /*fNotMask*/, true /*fSessionTermination*/);
1687 vgdrvSetSessionMouseStatus(pDevExt, pSession, 0 /*fOrMask*/, UINT32_MAX /*fNotMask*/, true /*fSessionTermination*/);
1688
1689 vgdrvIoCtl_CancelAllWaitEvents(pDevExt, pSession);
1690
1691#ifdef VBOX_WITH_HGCM
1692 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1693 if (pSession->aHGCMClientIds[i])
1694 {
1695 uint32_t idClient = pSession->aHGCMClientIds[i];
1696 pSession->aHGCMClientIds[i] = 0;
1697 Log(("VGDrvCommonCloseSession: disconnecting client id %#RX32\n", idClient));
1698 VbglR0HGCMInternalDisconnect(idClient, vgdrvHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1699 }
1700#endif
1701
1702 pSession->pDevExt = NULL;
1703 pSession->Process = NIL_RTPROCESS;
1704 pSession->R0Process = NIL_RTR0PROCESS;
1705 vgdrvCloseMemBalloon(pDevExt, pSession);
1706 RTMemFree(pSession);
1707}
1708
1709
1710/**
1711 * Allocates a wait-for-event entry.
1712 *
1713 * @returns The wait-for-event entry.
1714 * @param pDevExt The device extension.
1715 * @param pSession The session that's allocating this. Can be NULL.
1716 */
1717static PVBOXGUESTWAIT vgdrvWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1718{
1719 /*
1720 * Allocate it one way or the other.
1721 */
1722 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1723 if (pWait)
1724 {
1725 RTSpinlockAcquire(pDevExt->EventSpinlock);
1726
1727 pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1728 if (pWait)
1729 RTListNodeRemove(&pWait->ListNode);
1730
1731 RTSpinlockRelease(pDevExt->EventSpinlock);
1732 }
1733 if (!pWait)
1734 {
1735 int rc;
1736
1737 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1738 if (!pWait)
1739 {
1740 LogRelMax(32, ("vgdrvWaitAlloc: out-of-memory!\n"));
1741 return NULL;
1742 }
1743
1744 rc = RTSemEventMultiCreate(&pWait->Event);
1745 if (RT_FAILURE(rc))
1746 {
1747 LogRelMax(32, ("vgdrvWaitAlloc: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
1748 RTMemFree(pWait);
1749 return NULL;
1750 }
1751
1752 pWait->ListNode.pNext = NULL;
1753 pWait->ListNode.pPrev = NULL;
1754 }
1755
1756 /*
1757 * Zero members just as an precaution.
1758 */
1759 pWait->fReqEvents = 0;
1760 pWait->fResEvents = 0;
1761#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1762 pWait->fPendingWakeUp = false;
1763 pWait->fFreeMe = false;
1764#endif
1765 pWait->pSession = pSession;
1766#ifdef VBOX_WITH_HGCM
1767 pWait->pHGCMReq = NULL;
1768#endif
1769 RTSemEventMultiReset(pWait->Event);
1770 return pWait;
1771}
1772
1773
1774/**
1775 * Frees the wait-for-event entry.
1776 *
1777 * The caller must own the wait spinlock !
1778 * The entry must be in a list!
1779 *
1780 * @param pDevExt The device extension.
1781 * @param pWait The wait-for-event entry to free.
1782 */
1783static void vgdrvWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1784{
1785 pWait->fReqEvents = 0;
1786 pWait->fResEvents = 0;
1787#ifdef VBOX_WITH_HGCM
1788 pWait->pHGCMReq = NULL;
1789#endif
1790#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1791 Assert(!pWait->fFreeMe);
1792 if (pWait->fPendingWakeUp)
1793 pWait->fFreeMe = true;
1794 else
1795#endif
1796 {
1797 RTListNodeRemove(&pWait->ListNode);
1798 RTListAppend(&pDevExt->FreeList, &pWait->ListNode);
1799 }
1800}
1801
1802
1803/**
1804 * Frees the wait-for-event entry.
1805 *
1806 * @param pDevExt The device extension.
1807 * @param pWait The wait-for-event entry to free.
1808 */
1809static void vgdrvWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1810{
1811 RTSpinlockAcquire(pDevExt->EventSpinlock);
1812 vgdrvWaitFreeLocked(pDevExt, pWait);
1813 RTSpinlockRelease(pDevExt->EventSpinlock);
1814}
1815
1816
1817#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1818/**
1819 * Processes the wake-up list.
1820 *
1821 * All entries in the wake-up list gets signalled and moved to the woken-up
1822 * list.
1823 * At least on Windows this function can be invoked concurrently from
1824 * different VCPUs. So, be thread-safe.
1825 *
1826 * @param pDevExt The device extension.
1827 */
1828void VGDrvCommonWaitDoWakeUps(PVBOXGUESTDEVEXT pDevExt)
1829{
1830 if (!RTListIsEmpty(&pDevExt->WakeUpList))
1831 {
1832 RTSpinlockAcquire(pDevExt->EventSpinlock);
1833 for (;;)
1834 {
1835 int rc;
1836 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->WakeUpList, VBOXGUESTWAIT, ListNode);
1837 if (!pWait)
1838 break;
1839 /* Prevent other threads from accessing pWait when spinlock is released. */
1840 RTListNodeRemove(&pWait->ListNode);
1841
1842 pWait->fPendingWakeUp = true;
1843 RTSpinlockRelease(pDevExt->EventSpinlock);
1844
1845 rc = RTSemEventMultiSignal(pWait->Event);
1846 AssertRC(rc);
1847
1848 RTSpinlockAcquire(pDevExt->EventSpinlock);
1849 Assert(pWait->ListNode.pNext == NULL && pWait->ListNode.pPrev == NULL);
1850 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1851 pWait->fPendingWakeUp = false;
1852 if (RT_LIKELY(!pWait->fFreeMe))
1853 { /* likely */ }
1854 else
1855 {
1856 pWait->fFreeMe = false;
1857 vgdrvWaitFreeLocked(pDevExt, pWait);
1858 }
1859 }
1860 RTSpinlockRelease(pDevExt->EventSpinlock);
1861 }
1862}
1863#endif /* VBOXGUEST_USE_DEFERRED_WAKE_UP */
1864
1865
1866/**
1867 * Implements the fast (no input or output) type of IOCtls.
1868 *
1869 * This is currently just a placeholder stub inherited from the support driver code.
1870 *
1871 * @returns VBox status code.
1872 * @param iFunction The IOCtl function number.
1873 * @param pDevExt The device extension.
1874 * @param pSession The session.
1875 */
1876int VGDrvCommonIoCtlFast(uintptr_t iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1877{
1878 LogFlow(("VGDrvCommonIoCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1879
1880 NOREF(iFunction);
1881 NOREF(pDevExt);
1882 NOREF(pSession);
1883 return VERR_NOT_SUPPORTED;
1884}
1885
1886
1887/**
1888 * Gets the driver I/O control interface version, maybe adjusting it for
1889 * backwards compatibility.
1890 *
1891 * The adjusting is currently not implemented as we only have one major I/O
1892 * control interface version out there to support. This is something we will
1893 * implement as needed.
1894 *
1895 * returns IPRT status code.
1896 * @param pDevExt The device extension.
1897 * @param pSession The session.
1898 * @param pReq The request info.
1899 */
1900static int vgdrvIoCtl_DriverVersionInfo(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCDRIVERVERSIONINFO pReq)
1901{
1902 int rc;
1903 LogFlow(("VBGL_IOCTL_DRIVER_VERSION_INFO: uReqVersion=%#x uMinVersion=%#x uReserved1=%#x uReserved2=%#x\n",
1904 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, pReq->u.In.uReserved1, pReq->u.In.uReserved2));
1905 RT_NOREF2(pDevExt, pSession);
1906
1907 /*
1908 * Input validation.
1909 */
1910 if ( pReq->u.In.uMinVersion <= pReq->u.In.uReqVersion
1911 && RT_HI_U16(pReq->u.In.uMinVersion) == RT_HI_U16(pReq->u.In.uReqVersion))
1912 {
1913 /*
1914 * Match the version.
1915 * The current logic is very simple, match the major interface version.
1916 */
1917 if ( pReq->u.In.uMinVersion <= VBGL_IOC_VERSION
1918 && RT_HI_U16(pReq->u.In.uMinVersion) == RT_HI_U16(VBGL_IOC_VERSION))
1919 rc = VINF_SUCCESS;
1920 else
1921 {
1922 LogRel(("VBGL_IOCTL_DRIVER_VERSION_INFO: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1923 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, VBGL_IOC_VERSION));
1924 rc = VERR_VERSION_MISMATCH;
1925 }
1926 }
1927 else
1928 {
1929 LogRel(("VBGL_IOCTL_DRIVER_VERSION_INFO: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
1930 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
1931 rc = VERR_INVALID_PARAMETER;
1932 }
1933
1934 pReq->u.Out.uSessionVersion = RT_SUCCESS(rc) ? VBGL_IOC_VERSION : UINT32_MAX;
1935 pReq->u.Out.uDriverVersion = VBGL_IOC_VERSION;
1936 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1937 pReq->u.Out.uReserved1 = 0;
1938 pReq->u.Out.uReserved2 = 0;
1939 return rc;
1940}
1941
1942
1943/**
1944 * Similar to vgdrvIoCtl_DriverVersionInfo, except its for IDC.
1945 *
1946 * returns IPRT status code.
1947 * @param pDevExt The device extension.
1948 * @param pSession The session.
1949 * @param pReq The request info.
1950 */
1951static int vgdrvIoCtl_IdcConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCIDCCONNECT pReq)
1952{
1953 int rc;
1954 LogFlow(("VBGL_IOCTL_IDC_CONNECT: u32MagicCookie=%#x uReqVersion=%#x uMinVersion=%#x uReserved=%#x\n",
1955 pReq->u.In.u32MagicCookie, pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, pReq->u.In.uReserved));
1956 Assert(pSession != NULL);
1957 RT_NOREF(pDevExt);
1958
1959 /*
1960 * Input validation.
1961 */
1962 if (pReq->u.In.u32MagicCookie == VBGL_IOCTL_IDC_CONNECT_MAGIC_COOKIE)
1963 {
1964 if ( pReq->u.In.uMinVersion <= pReq->u.In.uReqVersion
1965 && RT_HI_U16(pReq->u.In.uMinVersion) == RT_HI_U16(pReq->u.In.uReqVersion))
1966 {
1967 /*
1968 * Match the version.
1969 * The current logic is very simple, match the major interface version.
1970 */
1971 if ( pReq->u.In.uMinVersion <= VBGL_IOC_VERSION
1972 && RT_HI_U16(pReq->u.In.uMinVersion) == RT_HI_U16(VBGL_IOC_VERSION))
1973 {
1974 pReq->u.Out.pvSession = pSession;
1975 pReq->u.Out.uSessionVersion = VBGL_IOC_VERSION;
1976 pReq->u.Out.uDriverVersion = VBGL_IOC_VERSION;
1977 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1978 pReq->u.Out.uReserved1 = 0;
1979 pReq->u.Out.pvReserved2 = NULL;
1980 return VINF_SUCCESS;
1981
1982 }
1983 LogRel(("VBGL_IOCTL_IDC_CONNECT: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1984 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, VBGL_IOC_VERSION));
1985 rc = VERR_VERSION_MISMATCH;
1986 }
1987 else
1988 {
1989 LogRel(("VBGL_IOCTL_IDC_CONNECT: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
1990 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
1991 rc = VERR_INVALID_PARAMETER;
1992 }
1993
1994 pReq->u.Out.pvSession = NULL;
1995 pReq->u.Out.uSessionVersion = UINT32_MAX;
1996 pReq->u.Out.uDriverVersion = VBGL_IOC_VERSION;
1997 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1998 pReq->u.Out.uReserved1 = 0;
1999 pReq->u.Out.pvReserved2 = NULL;
2000 }
2001 else
2002 {
2003 LogRel(("VBGL_IOCTL_IDC_CONNECT: u32MagicCookie=%#x expected %#x!\n",
2004 pReq->u.In.u32MagicCookie, VBGL_IOCTL_IDC_CONNECT_MAGIC_COOKIE));
2005 rc = VERR_INVALID_PARAMETER;
2006 }
2007 return rc;
2008}
2009
2010
2011/**
2012 * Counterpart to vgdrvIoCtl_IdcConnect, destroys the session.
2013 *
2014 * returns IPRT status code.
2015 * @param pDevExt The device extension.
2016 * @param pSession The session.
2017 * @param pReq The request info.
2018 */
2019static int vgdrvIoCtl_IdcDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCIDCDISCONNECT pReq)
2020{
2021 LogFlow(("VBGL_IOCTL_IDC_DISCONNECT: pvSession=%p vs pSession=%p\n", pReq->u.In.pvSession, pSession));
2022 RT_NOREF(pDevExt);
2023 Assert(pSession != NULL);
2024
2025 if (pReq->u.In.pvSession == pSession)
2026 {
2027 VGDrvCommonCloseSession(pDevExt, pSession);
2028 return VINF_SUCCESS;
2029 }
2030 LogRel(("VBGL_IOCTL_IDC_DISCONNECT: In.pvSession=%p is not equal to pSession=%p!\n", pReq->u.In.pvSession, pSession));
2031 return VERR_INVALID_PARAMETER;
2032}
2033
2034
2035/**
2036 * Return the VMM device I/O info.
2037 *
2038 * returns IPRT status code.
2039 * @param pDevExt The device extension.
2040 * @param pInfo The request info.
2041 * @note Ring-0 only, caller checked.
2042 */
2043static int vgdrvIoCtl_GetVMMDevIoInfo(PVBOXGUESTDEVEXT pDevExt, PVBGLIOCGETVMMDEVIOINFO pInfo)
2044{
2045 LogFlow(("VBGL_IOCTL_GET_VMMDEV_IO_INFO\n"));
2046
2047 pInfo->u.Out.IoPort = pDevExt->IOPortBase;
2048 pInfo->u.Out.pvVmmDevMapping = pDevExt->pVMMDevMemory;
2049 pInfo->u.Out.auPadding[0] = 0;
2050#if HC_ARCH_BITS != 32
2051 pInfo->u.Out.auPadding[1] = 0;
2052 pInfo->u.Out.auPadding[2] = 0;
2053#endif
2054 return VINF_SUCCESS;
2055}
2056
2057
2058/**
2059 * Set the callback for the kernel mouse handler.
2060 *
2061 * returns IPRT status code.
2062 * @param pDevExt The device extension.
2063 * @param pNotify The new callback information.
2064 */
2065int vgdrvIoCtl_SetMouseNotifyCallback(PVBOXGUESTDEVEXT pDevExt, PVBGLIOCSETMOUSENOTIFYCALLBACK pNotify)
2066{
2067 LogFlow(("VBOXGUEST_IOCTL_SET_MOUSE_NOTIFY_CALLBACK: pfnNotify=%p pvUser=%p\n", pNotify->u.In.pfnNotify, pNotify->u.In.pvUser));
2068
2069#ifdef VBOXGUEST_MOUSE_NOTIFY_CAN_PREEMPT
2070 VGDrvNativeSetMouseNotifyCallback(pDevExt, pNotify);
2071#else
2072 RTSpinlockAcquire(pDevExt->EventSpinlock);
2073 pDevExt->pfnMouseNotifyCallback = pNotify->u.In.pfnNotify;
2074 pDevExt->pvMouseNotifyCallbackArg = pNotify->u.In.pvUser;
2075 RTSpinlockRelease(pDevExt->EventSpinlock);
2076#endif
2077 return VINF_SUCCESS;
2078}
2079
2080
2081/**
2082 * Worker vgdrvIoCtl_WaitEvent.
2083 *
2084 * The caller enters the spinlock, we leave it.
2085 *
2086 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
2087 */
2088DECLINLINE(int) vbdgCheckWaitEventCondition(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2089 PVBGLIOCWAITFOREVENTS pInfo, int iEvent, const uint32_t fReqEvents)
2090{
2091 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
2092 if (fMatches & VBOXGUEST_ACQUIRE_STYLE_EVENTS)
2093 fMatches &= vgdrvGetAllowedEventMaskForSession(pDevExt, pSession);
2094 if (fMatches || pSession->fPendingCancelWaitEvents)
2095 {
2096 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
2097 RTSpinlockRelease(pDevExt->EventSpinlock);
2098
2099 pInfo->u.Out.fEvents = fMatches;
2100 if (fReqEvents & ~((uint32_t)1 << iEvent))
2101 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x\n", pInfo->u.Out.fEvents));
2102 else
2103 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x/%d\n", pInfo->u.Out.fEvents, iEvent));
2104 pSession->fPendingCancelWaitEvents = false;
2105 return VINF_SUCCESS;
2106 }
2107
2108 RTSpinlockRelease(pDevExt->EventSpinlock);
2109 return VERR_TIMEOUT;
2110}
2111
2112
2113static int vgdrvIoCtl_WaitForEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2114 PVBGLIOCWAITFOREVENTS pInfo, bool fInterruptible)
2115{
2116 uint32_t const cMsTimeout = pInfo->u.In.cMsTimeOut;
2117 const uint32_t fReqEvents = pInfo->u.In.fEvents;
2118 uint32_t fResEvents;
2119 int iEvent;
2120 PVBOXGUESTWAIT pWait;
2121 int rc;
2122
2123 pInfo->u.Out.fEvents = 0; /* Note! This overwrites pInfo->u.In.* fields! */
2124
2125 /*
2126 * Copy and verify the input mask.
2127 */
2128 iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
2129 if (RT_UNLIKELY(iEvent < 0))
2130 {
2131 LogRel(("VBOXGUEST_IOCTL_WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
2132 return VERR_INVALID_PARAMETER;
2133 }
2134
2135 /*
2136 * Check the condition up front, before doing the wait-for-event allocations.
2137 */
2138 RTSpinlockAcquire(pDevExt->EventSpinlock);
2139 rc = vbdgCheckWaitEventCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
2140 if (rc == VINF_SUCCESS)
2141 return rc;
2142
2143 if (!cMsTimeout)
2144 {
2145 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns VERR_TIMEOUT\n"));
2146 return VERR_TIMEOUT;
2147 }
2148
2149 pWait = vgdrvWaitAlloc(pDevExt, pSession);
2150 if (!pWait)
2151 return VERR_NO_MEMORY;
2152 pWait->fReqEvents = fReqEvents;
2153
2154 /*
2155 * We've got the wait entry now, re-enter the spinlock and check for the condition.
2156 * If the wait condition is met, return.
2157 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
2158 */
2159 RTSpinlockAcquire(pDevExt->EventSpinlock);
2160 RTListAppend(&pDevExt->WaitList, &pWait->ListNode);
2161 rc = vbdgCheckWaitEventCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
2162 if (rc == VINF_SUCCESS)
2163 {
2164 vgdrvWaitFreeUnlocked(pDevExt, pWait);
2165 return rc;
2166 }
2167
2168 if (fInterruptible)
2169 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMsTimeout == UINT32_MAX ? RT_INDEFINITE_WAIT : cMsTimeout);
2170 else
2171 rc = RTSemEventMultiWait(pWait->Event, cMsTimeout == UINT32_MAX ? RT_INDEFINITE_WAIT : cMsTimeout);
2172
2173 /*
2174 * There is one special case here and that's when the semaphore is
2175 * destroyed upon device driver unload. This shouldn't happen of course,
2176 * but in case it does, just get out of here ASAP.
2177 */
2178 if (rc == VERR_SEM_DESTROYED)
2179 return rc;
2180
2181 /*
2182 * Unlink the wait item and dispose of it.
2183 */
2184 RTSpinlockAcquire(pDevExt->EventSpinlock);
2185 fResEvents = pWait->fResEvents;
2186 vgdrvWaitFreeLocked(pDevExt, pWait);
2187 RTSpinlockRelease(pDevExt->EventSpinlock);
2188
2189 /*
2190 * Now deal with the return code.
2191 */
2192 if ( fResEvents
2193 && fResEvents != UINT32_MAX)
2194 {
2195 pInfo->u.Out.fEvents = fResEvents;
2196 if (fReqEvents & ~((uint32_t)1 << iEvent))
2197 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x\n", pInfo->u.Out.fEvents));
2198 else
2199 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x/%d\n", pInfo->u.Out.fEvents, iEvent));
2200 rc = VINF_SUCCESS;
2201 }
2202 else if ( fResEvents == UINT32_MAX
2203 || rc == VERR_INTERRUPTED)
2204 {
2205 rc = VERR_INTERRUPTED;
2206 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns VERR_INTERRUPTED\n"));
2207 }
2208 else if (rc == VERR_TIMEOUT)
2209 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns VERR_TIMEOUT (2)\n"));
2210 else
2211 {
2212 if (RT_SUCCESS(rc))
2213 {
2214 LogRelMax(32, ("VBOXGUEST_IOCTL_WAITEVENT: returns %Rrc but no events!\n", rc));
2215 rc = VERR_INTERNAL_ERROR;
2216 }
2217 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %Rrc\n", rc));
2218 }
2219
2220 return rc;
2221}
2222
2223
2224/** @todo the semantics of this IoCtl have been tightened, so that no calls to
2225 * VBOXGUEST_IOCTL_WAITEVENT are allowed in a session after it has been
2226 * called. Change the code to make calls to VBOXGUEST_IOCTL_WAITEVENT made
2227 * after that to return VERR_INTERRUPTED or something appropriate. */
2228static int vgdrvIoCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2229{
2230 PVBOXGUESTWAIT pWait;
2231 PVBOXGUESTWAIT pSafe;
2232 int rc = 0;
2233 /* Was as least one WAITEVENT in process for this session? If not we
2234 * set a flag that the next call should be interrupted immediately. This
2235 * is needed so that a user thread can reliably interrupt another one in a
2236 * WAITEVENT loop. */
2237 bool fCancelledOne = false;
2238
2239 LogFlow(("VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS\n"));
2240
2241 /*
2242 * Walk the event list and wake up anyone with a matching session.
2243 */
2244 RTSpinlockAcquire(pDevExt->EventSpinlock);
2245 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
2246 {
2247 if (pWait->pSession == pSession)
2248 {
2249 fCancelledOne = true;
2250 pWait->fResEvents = UINT32_MAX;
2251 RTListNodeRemove(&pWait->ListNode);
2252#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2253 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
2254#else
2255 rc |= RTSemEventMultiSignal(pWait->Event);
2256 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
2257#endif
2258 }
2259 }
2260 if (!fCancelledOne)
2261 pSession->fPendingCancelWaitEvents = true;
2262 RTSpinlockRelease(pDevExt->EventSpinlock);
2263 Assert(rc == 0);
2264 NOREF(rc);
2265
2266#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2267 VGDrvCommonWaitDoWakeUps(pDevExt);
2268#endif
2269
2270 return VINF_SUCCESS;
2271}
2272
2273
2274/**
2275 * Checks if the VMM request is allowed in the context of the given session.
2276 *
2277 * @returns VINF_SUCCESS or VERR_PERMISSION_DENIED.
2278 * @param pDevExt The device extension.
2279 * @param pSession The calling session.
2280 * @param enmType The request type.
2281 * @param pReqHdr The request.
2282 */
2283static int vgdrvCheckIfVmmReqIsAllowed(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VMMDevRequestType enmType,
2284 VMMDevRequestHeader const *pReqHdr)
2285{
2286 /*
2287 * Categorize the request being made.
2288 */
2289 /** @todo This need quite some more work! */
2290 enum
2291 {
2292 kLevel_Invalid, kLevel_NoOne, kLevel_OnlyVBoxGuest, kLevel_OnlyKernel, kLevel_TrustedUsers, kLevel_AllUsers
2293 } enmRequired;
2294 RT_NOREF1(pDevExt);
2295
2296 switch (enmType)
2297 {
2298 /*
2299 * Deny access to anything we don't know or provide specialized I/O controls for.
2300 */
2301#ifdef VBOX_WITH_HGCM
2302 case VMMDevReq_HGCMConnect:
2303 case VMMDevReq_HGCMDisconnect:
2304# ifdef VBOX_WITH_64_BITS_GUESTS
2305 case VMMDevReq_HGCMCall32:
2306 case VMMDevReq_HGCMCall64:
2307# else
2308 case VMMDevReq_HGCMCall:
2309# endif /* VBOX_WITH_64_BITS_GUESTS */
2310 case VMMDevReq_HGCMCancel:
2311 case VMMDevReq_HGCMCancel2:
2312#endif /* VBOX_WITH_HGCM */
2313 case VMMDevReq_SetGuestCapabilities:
2314 default:
2315 enmRequired = kLevel_NoOne;
2316 break;
2317
2318 /*
2319 * There are a few things only this driver can do (and it doesn't use
2320 * the VMMRequst I/O control route anyway, but whatever).
2321 */
2322 case VMMDevReq_ReportGuestInfo:
2323 case VMMDevReq_ReportGuestInfo2:
2324 case VMMDevReq_GetHypervisorInfo:
2325 case VMMDevReq_SetHypervisorInfo:
2326 case VMMDevReq_RegisterPatchMemory:
2327 case VMMDevReq_DeregisterPatchMemory:
2328 case VMMDevReq_GetMemBalloonChangeRequest:
2329 enmRequired = kLevel_OnlyVBoxGuest;
2330 break;
2331
2332 /*
2333 * Trusted users apps only.
2334 */
2335 case VMMDevReq_QueryCredentials:
2336 case VMMDevReq_ReportCredentialsJudgement:
2337 case VMMDevReq_RegisterSharedModule:
2338 case VMMDevReq_UnregisterSharedModule:
2339 case VMMDevReq_WriteCoreDump:
2340 case VMMDevReq_GetCpuHotPlugRequest:
2341 case VMMDevReq_SetCpuHotPlugStatus:
2342 case VMMDevReq_CheckSharedModules:
2343 case VMMDevReq_GetPageSharingStatus:
2344 case VMMDevReq_DebugIsPageShared:
2345 case VMMDevReq_ReportGuestStats:
2346 case VMMDevReq_ReportGuestUserState:
2347 case VMMDevReq_GetStatisticsChangeRequest:
2348 case VMMDevReq_ChangeMemBalloon:
2349 enmRequired = kLevel_TrustedUsers;
2350 break;
2351
2352 /*
2353 * Anyone.
2354 */
2355 case VMMDevReq_GetMouseStatus:
2356 case VMMDevReq_SetMouseStatus:
2357 case VMMDevReq_SetPointerShape:
2358 case VMMDevReq_GetHostVersion:
2359 case VMMDevReq_Idle:
2360 case VMMDevReq_GetHostTime:
2361 case VMMDevReq_SetPowerStatus:
2362 case VMMDevReq_AcknowledgeEvents:
2363 case VMMDevReq_CtlGuestFilterMask:
2364 case VMMDevReq_ReportGuestStatus:
2365 case VMMDevReq_GetDisplayChangeRequest:
2366 case VMMDevReq_VideoModeSupported:
2367 case VMMDevReq_GetHeightReduction:
2368 case VMMDevReq_GetDisplayChangeRequest2:
2369 case VMMDevReq_VideoModeSupported2:
2370 case VMMDevReq_VideoAccelEnable:
2371 case VMMDevReq_VideoAccelFlush:
2372 case VMMDevReq_VideoSetVisibleRegion:
2373 case VMMDevReq_GetDisplayChangeRequestEx:
2374 case VMMDevReq_GetSeamlessChangeRequest:
2375 case VMMDevReq_GetVRDPChangeRequest:
2376 case VMMDevReq_LogString:
2377 case VMMDevReq_GetSessionId:
2378 enmRequired = kLevel_AllUsers;
2379 break;
2380
2381 /*
2382 * Depends on the request parameters...
2383 */
2384 /** @todo this have to be changed into an I/O control and the facilities
2385 * tracked in the session so they can automatically be failed when the
2386 * session terminates without reporting the new status.
2387 *
2388 * The information presented by IGuest is not reliable without this! */
2389 case VMMDevReq_ReportGuestCapabilities:
2390 switch (((VMMDevReportGuestStatus const *)pReqHdr)->guestStatus.facility)
2391 {
2392 case VBoxGuestFacilityType_All:
2393 case VBoxGuestFacilityType_VBoxGuestDriver:
2394 enmRequired = kLevel_OnlyVBoxGuest;
2395 break;
2396 case VBoxGuestFacilityType_VBoxService:
2397 enmRequired = kLevel_TrustedUsers;
2398 break;
2399 case VBoxGuestFacilityType_VBoxTrayClient:
2400 case VBoxGuestFacilityType_Seamless:
2401 case VBoxGuestFacilityType_Graphics:
2402 default:
2403 enmRequired = kLevel_AllUsers;
2404 break;
2405 }
2406 break;
2407 }
2408
2409 /*
2410 * Check against the session.
2411 */
2412 switch (enmRequired)
2413 {
2414 default:
2415 case kLevel_NoOne:
2416 break;
2417 case kLevel_OnlyVBoxGuest:
2418 case kLevel_OnlyKernel:
2419 if (pSession->R0Process == NIL_RTR0PROCESS)
2420 return VINF_SUCCESS;
2421 break;
2422 case kLevel_TrustedUsers:
2423 case kLevel_AllUsers:
2424 return VINF_SUCCESS;
2425 }
2426
2427 return VERR_PERMISSION_DENIED;
2428}
2429
2430static int vgdrvIoCtl_VMMDevRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2431 VMMDevRequestHeader *pReqHdr, size_t cbData)
2432{
2433 int rc;
2434 VMMDevRequestHeader *pReqCopy;
2435
2436 /*
2437 * Validate the header and request size.
2438 */
2439 const VMMDevRequestType enmType = pReqHdr->requestType;
2440 const uint32_t cbReq = pReqHdr->size;
2441 const uint32_t cbMinSize = (uint32_t)vmmdevGetRequestSize(enmType);
2442
2443 LogFlow(("VBOXGUEST_IOCTL_VMMREQUEST: type %d\n", pReqHdr->requestType));
2444
2445 if (cbReq < cbMinSize)
2446 {
2447 LogRel(("VBOXGUEST_IOCTL_VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
2448 cbReq, cbMinSize, enmType));
2449 return VERR_INVALID_PARAMETER;
2450 }
2451 if (cbReq > cbData)
2452 {
2453 LogRel(("VBOXGUEST_IOCTL_VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
2454 cbData, cbReq, enmType));
2455 return VERR_INVALID_PARAMETER;
2456 }
2457 rc = VbglGR0Verify(pReqHdr, cbData);
2458 if (RT_FAILURE(rc))
2459 {
2460 Log(("VBOXGUEST_IOCTL_VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc!!\n",
2461 cbData, cbReq, enmType, rc));
2462 return rc;
2463 }
2464
2465 rc = vgdrvCheckIfVmmReqIsAllowed(pDevExt, pSession, enmType, pReqHdr);
2466 if (RT_FAILURE(rc))
2467 {
2468 Log(("VBOXGUEST_IOCTL_VMMREQUEST: Operation not allowed! type=%#x rc=%Rrc\n", enmType, rc));
2469 return rc;
2470 }
2471
2472 /*
2473 * Make a copy of the request in the physical memory heap so
2474 * the VBoxGuestLibrary can more easily deal with the request.
2475 * (This is really a waste of time since the OS or the OS specific
2476 * code has already buffered or locked the input/output buffer, but
2477 * it does makes things a bit simpler wrt to phys address.)
2478 */
2479 rc = VbglR0GRAlloc(&pReqCopy, cbReq, enmType);
2480 if (RT_FAILURE(rc))
2481 {
2482 Log(("VBOXGUEST_IOCTL_VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
2483 cbReq, cbReq, rc));
2484 return rc;
2485 }
2486 memcpy(pReqCopy, pReqHdr, cbReq);
2487 Assert(pReqCopy->reserved1 == cbReq);
2488 pReqCopy->reserved1 = 0; /* VGDrvCommonIoCtl or caller sets cbOut, so clear it. */
2489
2490 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
2491 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
2492
2493 rc = VbglR0GRPerform(pReqCopy);
2494 if ( RT_SUCCESS(rc)
2495 && RT_SUCCESS(pReqCopy->rc))
2496 {
2497 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
2498 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
2499
2500 memcpy(pReqHdr, pReqCopy, cbReq);
2501 pReqHdr->reserved1 = cbReq; /* preserve cbOut */
2502 }
2503 else if (RT_FAILURE(rc))
2504 Log(("VBOXGUEST_IOCTL_VMMREQUEST: VbglR0GRPerform - rc=%Rrc!\n", rc));
2505 else
2506 {
2507 Log(("VBOXGUEST_IOCTL_VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
2508 rc = pReqCopy->rc;
2509 }
2510
2511 VbglR0GRFree(pReqCopy);
2512 return rc;
2513}
2514
2515
2516#ifdef VBOX_WITH_HGCM
2517
2518AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
2519
2520/** Worker for vgdrvHgcmAsyncWaitCallback*. */
2521static int vgdrvHgcmAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
2522 bool fInterruptible, uint32_t cMillies)
2523{
2524 int rc;
2525
2526 /*
2527 * Check to see if the condition was met by the time we got here.
2528 *
2529 * We create a simple poll loop here for dealing with out-of-memory
2530 * conditions since the caller isn't necessarily able to deal with
2531 * us returning too early.
2532 */
2533 PVBOXGUESTWAIT pWait;
2534 for (;;)
2535 {
2536 RTSpinlockAcquire(pDevExt->EventSpinlock);
2537 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
2538 {
2539 RTSpinlockRelease(pDevExt->EventSpinlock);
2540 return VINF_SUCCESS;
2541 }
2542 RTSpinlockRelease(pDevExt->EventSpinlock);
2543
2544 pWait = vgdrvWaitAlloc(pDevExt, NULL);
2545 if (pWait)
2546 break;
2547 if (fInterruptible)
2548 return VERR_INTERRUPTED;
2549 RTThreadSleep(1);
2550 }
2551 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
2552 pWait->pHGCMReq = pHdr;
2553
2554 /*
2555 * Re-enter the spinlock and re-check for the condition.
2556 * If the condition is met, return.
2557 * Otherwise link us into the HGCM wait list and go to sleep.
2558 */
2559 RTSpinlockAcquire(pDevExt->EventSpinlock);
2560 RTListAppend(&pDevExt->HGCMWaitList, &pWait->ListNode);
2561 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
2562 {
2563 vgdrvWaitFreeLocked(pDevExt, pWait);
2564 RTSpinlockRelease(pDevExt->EventSpinlock);
2565 return VINF_SUCCESS;
2566 }
2567 RTSpinlockRelease(pDevExt->EventSpinlock);
2568
2569 if (fInterruptible)
2570 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
2571 else
2572 rc = RTSemEventMultiWait(pWait->Event, cMillies);
2573 if (rc == VERR_SEM_DESTROYED)
2574 return rc;
2575
2576 /*
2577 * Unlink, free and return.
2578 */
2579 if ( RT_FAILURE(rc)
2580 && rc != VERR_TIMEOUT
2581 && ( !fInterruptible
2582 || rc != VERR_INTERRUPTED))
2583 LogRel(("vgdrvHgcmAsyncWaitCallback: wait failed! %Rrc\n", rc));
2584
2585 vgdrvWaitFreeUnlocked(pDevExt, pWait);
2586 return rc;
2587}
2588
2589
2590/**
2591 * This is a callback for dealing with async waits.
2592 *
2593 * It operates in a manner similar to vgdrvIoCtl_WaitEvent.
2594 */
2595static DECLCALLBACK(int) vgdrvHgcmAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
2596{
2597 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
2598 LogFlow(("vgdrvHgcmAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
2599 return vgdrvHgcmAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr, pDevExt,
2600 false /* fInterruptible */, u32User /* cMillies */);
2601}
2602
2603
2604/**
2605 * This is a callback for dealing with async waits with a timeout.
2606 *
2607 * It operates in a manner similar to vgdrvIoCtl_WaitEvent.
2608 */
2609static DECLCALLBACK(int) vgdrvHgcmAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
2610{
2611 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
2612 LogFlow(("vgdrvHgcmAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
2613 return vgdrvHgcmAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr, pDevExt,
2614 true /* fInterruptible */, u32User /* cMillies */);
2615}
2616
2617
2618static int vgdrvIoCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCHGCMCONNECT pInfo)
2619{
2620 int rc;
2621 HGCMCLIENTID idClient = 0;
2622
2623 /*
2624 * The VbglHGCMConnect call will invoke the callback if the HGCM
2625 * call is performed in an ASYNC fashion. The function is not able
2626 * to deal with cancelled requests.
2627 */
2628 Log(("VBOXGUEST_IOCTL_HGCM_CONNECT: %.128s\n",
2629 pInfo->u.In.Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->u.In.Loc.type == VMMDevHGCMLoc_LocalHost_Existing
2630 ? pInfo->u.In.Loc.u.host.achName : "<not local host>"));
2631
2632 rc = VbglR0HGCMInternalConnect(&pInfo->u.In.Loc, &idClient, vgdrvHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2633 Log(("VBOXGUEST_IOCTL_HGCM_CONNECT: idClient=%RX32 (rc=%Rrc)\n", idClient, rc));
2634 if (RT_SUCCESS(rc))
2635 {
2636 /*
2637 * Append the client id to the client id table.
2638 * If the table has somehow become filled up, we'll disconnect the session.
2639 */
2640 unsigned i;
2641 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2642 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2643 if (!pSession->aHGCMClientIds[i])
2644 {
2645 pSession->aHGCMClientIds[i] = idClient;
2646 break;
2647 }
2648 RTSpinlockRelease(pDevExt->SessionSpinlock);
2649 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
2650 {
2651 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
2652 VbglR0HGCMInternalDisconnect(idClient, vgdrvHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2653
2654 pInfo->u.Out.idClient = 0;
2655 return VERR_TOO_MANY_OPEN_FILES;
2656 }
2657 }
2658 pInfo->u.Out.idClient = idClient;
2659 return rc;
2660}
2661
2662
2663static int vgdrvIoCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCHGCMDISCONNECT pInfo)
2664{
2665 /*
2666 * Validate the client id and invalidate its entry while we're in the call.
2667 */
2668 int rc;
2669 const uint32_t idClient = pInfo->u.In.idClient;
2670 unsigned i;
2671 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2672 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2673 if (pSession->aHGCMClientIds[i] == idClient)
2674 {
2675 pSession->aHGCMClientIds[i] = UINT32_MAX;
2676 break;
2677 }
2678 RTSpinlockRelease(pDevExt->SessionSpinlock);
2679 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
2680 {
2681 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_DISCONNECT: idClient=%RX32\n", idClient));
2682 return VERR_INVALID_HANDLE;
2683 }
2684
2685 /*
2686 * The VbglHGCMConnect call will invoke the callback if the HGCM
2687 * call is performed in an ASYNC fashion. The function is not able
2688 * to deal with cancelled requests.
2689 */
2690 Log(("VBOXGUEST_IOCTL_HGCM_DISCONNECT: idClient=%RX32\n", idClient));
2691 rc = VbglR0HGCMInternalDisconnect(idClient, vgdrvHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2692 LogFlow(("VBOXGUEST_IOCTL_HGCM_DISCONNECT: rc=%Rrc\n", rc));
2693
2694 /* Update the client id array according to the result. */
2695 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2696 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
2697 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) ? 0 : idClient;
2698 RTSpinlockRelease(pDevExt->SessionSpinlock);
2699
2700 return rc;
2701}
2702
2703
2704static int vgdrvIoCtl_HGCMCallInner(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCHGCMCALL pInfo,
2705 uint32_t cMillies, bool fInterruptible, bool f32bit, bool fUserData,
2706 size_t cbExtra, size_t cbData)
2707{
2708 const uint32_t u32ClientId = pInfo->u32ClientID;
2709 uint32_t fFlags;
2710 size_t cbActual;
2711 unsigned i;
2712 int rc;
2713
2714 /*
2715 * Some more validations.
2716 */
2717 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
2718 {
2719 LogRel(("VBOXGUEST_IOCTL_HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
2720 return VERR_INVALID_PARAMETER;
2721 }
2722
2723 cbActual = cbExtra + sizeof(*pInfo);
2724#ifdef RT_ARCH_AMD64
2725 if (f32bit)
2726 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
2727 else
2728#endif
2729 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
2730 if (cbData < cbActual)
2731 {
2732 LogRel(("VBOXGUEST_IOCTL_HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
2733 cbData, cbData, cbActual, cbActual));
2734 return VERR_INVALID_PARAMETER;
2735 }
2736 pInfo->Hdr.cbOut = (uint32_t)cbActual;
2737
2738 /*
2739 * Validate the client id.
2740 */
2741 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2742 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2743 if (pSession->aHGCMClientIds[i] == u32ClientId)
2744 break;
2745 RTSpinlockRelease(pDevExt->SessionSpinlock);
2746 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
2747 {
2748 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
2749 return VERR_INVALID_HANDLE;
2750 }
2751
2752 /*
2753 * The VbglHGCMCall call will invoke the callback if the HGCM
2754 * call is performed in an ASYNC fashion. This function can
2755 * deal with cancelled requests, so we let user more requests
2756 * be interruptible (should add a flag for this later I guess).
2757 */
2758 LogFlow(("VBOXGUEST_IOCTL_HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
2759 fFlags = !fUserData && pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
2760 uint32_t cbInfo = (uint32_t)(cbData - cbExtra);
2761#ifdef RT_ARCH_AMD64
2762 if (f32bit)
2763 {
2764 if (fInterruptible)
2765 rc = VbglR0HGCMInternalCall32(pInfo, cbInfo, fFlags, vgdrvHgcmAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2766 else
2767 rc = VbglR0HGCMInternalCall32(pInfo, cbInfo, fFlags, vgdrvHgcmAsyncWaitCallback, pDevExt, cMillies);
2768 }
2769 else
2770#endif
2771 {
2772 if (fInterruptible)
2773 rc = VbglR0HGCMInternalCall(pInfo, cbInfo, fFlags, vgdrvHgcmAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2774 else
2775 rc = VbglR0HGCMInternalCall(pInfo, cbInfo, fFlags, vgdrvHgcmAsyncWaitCallback, pDevExt, cMillies);
2776 }
2777 if (RT_SUCCESS(rc))
2778 {
2779 rc = pInfo->Hdr.rc;
2780 LogFlow(("VBOXGUEST_IOCTL_HGCM_CALL: result=%Rrc\n", rc));
2781 }
2782 else
2783 {
2784 if ( rc != VERR_INTERRUPTED
2785 && rc != VERR_TIMEOUT)
2786 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_CALL: %s Failed. rc=%Rrc (Hdr.rc=%Rrc).\n", f32bit ? "32" : "64", rc, pInfo->Hdr.rc));
2787 else
2788 Log(("VBOXGUEST_IOCTL_HGCM_CALL: %s Failed. rc=%Rrc (Hdr.rc=%Rrc).\n", f32bit ? "32" : "64", rc, pInfo->Hdr.rc));
2789 }
2790 return rc;
2791}
2792
2793
2794static int vgdrvIoCtl_HGCMCallWrapper(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCHGCMCALL pInfo,
2795 bool f32bit, bool fUserData, size_t cbData)
2796{
2797 return vgdrvIoCtl_HGCMCallInner(pDevExt, pSession, pInfo, pInfo->cMsTimeout,
2798 pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2799 f32bit, fUserData, 0 /*cbExtra*/, cbData);
2800}
2801
2802
2803#endif /* VBOX_WITH_HGCM */
2804
2805/**
2806 * Handle VBGL_IOCTL_CHECK_BALLOON from R3.
2807 *
2808 * Ask the host for the size of the balloon and try to set it accordingly. If
2809 * this approach fails because it's not supported, return with fHandleInR3 set
2810 * and let the user land supply memory we can lock via the other ioctl.
2811 *
2812 * @returns VBox status code.
2813 *
2814 * @param pDevExt The device extension.
2815 * @param pSession The session.
2816 * @param pInfo The output buffer.
2817 */
2818static int vgdrvIoCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCCHECKBALLOON pInfo)
2819{
2820 VMMDevGetMemBalloonChangeRequest *pReq;
2821 int rc;
2822
2823 LogFlow(("VBGL_IOCTL_CHECK_BALLOON:\n"));
2824 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2825 AssertRCReturn(rc, rc);
2826
2827 /*
2828 * The first user trying to query/change the balloon becomes the
2829 * owner and owns it until the session is closed (vgdrvCloseMemBalloon).
2830 */
2831 if ( pDevExt->MemBalloon.pOwner != pSession
2832 && pDevExt->MemBalloon.pOwner == NULL)
2833 pDevExt->MemBalloon.pOwner = pSession;
2834
2835 if (pDevExt->MemBalloon.pOwner == pSession)
2836 {
2837 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
2838 if (RT_SUCCESS(rc))
2839 {
2840 /*
2841 * This is a response to that event. Setting this bit means that
2842 * we request the value from the host and change the guest memory
2843 * balloon according to this value.
2844 */
2845 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
2846 rc = VbglR0GRPerform(&pReq->header);
2847 if (RT_SUCCESS(rc))
2848 {
2849 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
2850 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
2851
2852 pInfo->u.Out.cBalloonChunks = pReq->cBalloonChunks;
2853 pInfo->u.Out.fHandleInR3 = false;
2854 pInfo->u.Out.afPadding[0] = false;
2855 pInfo->u.Out.afPadding[1] = false;
2856 pInfo->u.Out.afPadding[2] = false;
2857
2858 rc = vgdrvSetBalloonSizeKernel(pDevExt, pReq->cBalloonChunks, &pInfo->u.Out.fHandleInR3);
2859 /* Ignore various out of memory failures. */
2860 if ( rc == VERR_NO_MEMORY
2861 || rc == VERR_NO_PHYS_MEMORY
2862 || rc == VERR_NO_CONT_MEMORY)
2863 rc = VINF_SUCCESS;
2864 }
2865 else
2866 LogRel(("VBGL_IOCTL_CHECK_BALLOON: VbglR0GRPerform failed. rc=%Rrc\n", rc));
2867 VbglR0GRFree(&pReq->header);
2868 }
2869 }
2870 else
2871 rc = VERR_PERMISSION_DENIED;
2872
2873 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2874 LogFlow(("VBGL_IOCTL_CHECK_BALLOON returns %Rrc\n", rc));
2875 return rc;
2876}
2877
2878
2879/**
2880 * Handle a request for changing the memory balloon.
2881 *
2882 * @returns VBox status code.
2883 *
2884 * @param pDevExt The device extention.
2885 * @param pSession The session.
2886 * @param pInfo The change request structure (input).
2887 */
2888static int vgdrvIoCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCCHANGEBALLOON pInfo)
2889{
2890 int rc;
2891 LogFlow(("VBGL_IOCTL_CHANGE_BALLOON: fInflate=%RTbool u64ChunkAddr=%p\n", pInfo->u.In.fInflate, pInfo->u.In.pvChunk));
2892 if ( pInfo->u.In.abPadding[0]
2893 || pInfo->u.In.abPadding[1]
2894 || pInfo->u.In.abPadding[2]
2895 || pInfo->u.In.abPadding[3]
2896 || pInfo->u.In.abPadding[4]
2897 || pInfo->u.In.abPadding[5]
2898 || pInfo->u.In.abPadding[6]
2899#if ARCH_BITS == 32
2900 || pInfo->u.In.abPadding[7]
2901 || pInfo->u.In.abPadding[8]
2902 || pInfo->u.In.abPadding[9]
2903#endif
2904 )
2905 {
2906 Log(("VBGL_IOCTL_CHANGE_BALLOON: Padding isn't all zero: %.*Rhxs\n", sizeof(pInfo->u.In.abPadding), pInfo->u.In.abPadding));
2907 return VERR_INVALID_PARAMETER;
2908 }
2909
2910 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2911 AssertRCReturn(rc, rc);
2912
2913 if (!pDevExt->MemBalloon.fUseKernelAPI)
2914 {
2915 /*
2916 * The first user trying to query/change the balloon becomes the
2917 * owner and owns it until the session is closed (vgdrvCloseMemBalloon).
2918 */
2919 if ( pDevExt->MemBalloon.pOwner != pSession
2920 && pDevExt->MemBalloon.pOwner == NULL)
2921 pDevExt->MemBalloon.pOwner = pSession;
2922
2923 if (pDevExt->MemBalloon.pOwner == pSession)
2924 rc = vgdrvSetBalloonSizeFromUser(pDevExt, pSession, pInfo->u.In.pvChunk, pInfo->u.In.fInflate != false);
2925 else
2926 rc = VERR_PERMISSION_DENIED;
2927 }
2928 else
2929 rc = VERR_PERMISSION_DENIED;
2930
2931 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2932 return rc;
2933}
2934
2935
2936/**
2937 * Handle a request for writing a core dump of the guest on the host.
2938 *
2939 * @returns VBox status code.
2940 *
2941 * @param pDevExt The device extension.
2942 * @param pInfo The output buffer.
2943 */
2944static int vgdrvIoCtl_WriteCoreDump(PVBOXGUESTDEVEXT pDevExt, PVBGLIOCWRITECOREDUMP pInfo)
2945{
2946 VMMDevReqWriteCoreDump *pReq = NULL;
2947 int rc;
2948 LogFlow(("VBOXGUEST_IOCTL_WRITE_CORE_DUMP\n"));
2949 RT_NOREF1(pDevExt);
2950
2951 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_WriteCoreDump);
2952 if (RT_SUCCESS(rc))
2953 {
2954 pReq->fFlags = pInfo->u.In.fFlags;
2955 rc = VbglR0GRPerform(&pReq->header);
2956 if (RT_FAILURE(rc))
2957 Log(("VBOXGUEST_IOCTL_WRITE_CORE_DUMP: VbglR0GRPerform failed, rc=%Rrc!\n", rc));
2958
2959 VbglR0GRFree(&pReq->header);
2960 }
2961 else
2962 Log(("VBOXGUEST_IOCTL_WRITE_CORE_DUMP: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
2963 sizeof(*pReq), sizeof(*pReq), rc));
2964 return rc;
2965}
2966
2967
2968/**
2969 * Guest backdoor logging.
2970 *
2971 * @returns VBox status code.
2972 *
2973 * @param pDevExt The device extension.
2974 * @param pch The log message (need not be NULL terminated).
2975 * @param cbData Size of the buffer.
2976 * @param fUserSession Copy of VBOXGUESTSESSION::fUserSession for the
2977 * call. True normal user, false root user.
2978 */
2979static int vgdrvIoCtl_Log(PVBOXGUESTDEVEXT pDevExt, const char *pch, size_t cbData, bool fUserSession)
2980{
2981 if (pDevExt->fLoggingEnabled)
2982 RTLogBackdoorPrintf("%.*s", cbData, pch);
2983 else if (!fUserSession)
2984 LogRel(("%.*s", cbData, pch));
2985 else
2986 Log(("%.*s", cbData, pch));
2987 return VINF_SUCCESS;
2988}
2989
2990
2991/** @name Guest Capabilities, Mouse Status and Event Filter
2992 * @{
2993 */
2994
2995/**
2996 * Clears a bit usage tracker (init time).
2997 *
2998 * @param pTracker The tracker to clear.
2999 */
3000static void vgdrvBitUsageTrackerClear(PVBOXGUESTBITUSAGETRACER pTracker)
3001{
3002 uint32_t iBit;
3003 AssertCompile(sizeof(pTracker->acPerBitUsage) == 32 * sizeof(uint32_t));
3004
3005 for (iBit = 0; iBit < 32; iBit++)
3006 pTracker->acPerBitUsage[iBit] = 0;
3007 pTracker->fMask = 0;
3008}
3009
3010
3011#ifdef VBOX_STRICT
3012/**
3013 * Checks that pTracker->fMask is correct and that the usage values are within
3014 * the valid range.
3015 *
3016 * @param pTracker The tracker.
3017 * @param cMax Max valid usage value.
3018 * @param pszWhat Identifies the tracker in assertions.
3019 */
3020static void vgdrvBitUsageTrackerCheckMask(PCVBOXGUESTBITUSAGETRACER pTracker, uint32_t cMax, const char *pszWhat)
3021{
3022 uint32_t fMask = 0;
3023 uint32_t iBit;
3024 AssertCompile(sizeof(pTracker->acPerBitUsage) == 32 * sizeof(uint32_t));
3025
3026 for (iBit = 0; iBit < 32; iBit++)
3027 if (pTracker->acPerBitUsage[iBit])
3028 {
3029 fMask |= RT_BIT_32(iBit);
3030 AssertMsg(pTracker->acPerBitUsage[iBit] <= cMax,
3031 ("%s: acPerBitUsage[%u]=%#x cMax=%#x\n", pszWhat, iBit, pTracker->acPerBitUsage[iBit], cMax));
3032 }
3033
3034 AssertMsg(fMask == pTracker->fMask, ("%s: %#x vs %#x\n", pszWhat, fMask, pTracker->fMask));
3035}
3036#endif
3037
3038
3039/**
3040 * Applies a change to the bit usage tracker.
3041 *
3042 *
3043 * @returns true if the mask changed, false if not.
3044 * @param pTracker The bit usage tracker.
3045 * @param fChanged The bits to change.
3046 * @param fPrevious The previous value of the bits.
3047 * @param cMax The max valid usage value for assertions.
3048 * @param pszWhat Identifies the tracker in assertions.
3049 */
3050static bool vgdrvBitUsageTrackerChange(PVBOXGUESTBITUSAGETRACER pTracker, uint32_t fChanged, uint32_t fPrevious,
3051 uint32_t cMax, const char *pszWhat)
3052{
3053 bool fGlobalChange = false;
3054 AssertCompile(sizeof(pTracker->acPerBitUsage) == 32 * sizeof(uint32_t));
3055
3056 while (fChanged)
3057 {
3058 uint32_t const iBit = ASMBitFirstSetU32(fChanged) - 1;
3059 uint32_t const fBitMask = RT_BIT_32(iBit);
3060 Assert(iBit < 32); Assert(fBitMask & fChanged);
3061
3062 if (fBitMask & fPrevious)
3063 {
3064 pTracker->acPerBitUsage[iBit] -= 1;
3065 AssertMsg(pTracker->acPerBitUsage[iBit] <= cMax,
3066 ("%s: acPerBitUsage[%u]=%#x cMax=%#x\n", pszWhat, iBit, pTracker->acPerBitUsage[iBit], cMax));
3067 if (pTracker->acPerBitUsage[iBit] == 0)
3068 {
3069 fGlobalChange = true;
3070 pTracker->fMask &= ~fBitMask;
3071 }
3072 }
3073 else
3074 {
3075 pTracker->acPerBitUsage[iBit] += 1;
3076 AssertMsg(pTracker->acPerBitUsage[iBit] > 0 && pTracker->acPerBitUsage[iBit] <= cMax,
3077 ("pTracker->acPerBitUsage[%u]=%#x cMax=%#x\n", pszWhat, iBit, pTracker->acPerBitUsage[iBit], cMax));
3078 if (pTracker->acPerBitUsage[iBit] == 1)
3079 {
3080 fGlobalChange = true;
3081 pTracker->fMask |= fBitMask;
3082 }
3083 }
3084
3085 fChanged &= ~fBitMask;
3086 }
3087
3088#ifdef VBOX_STRICT
3089 vgdrvBitUsageTrackerCheckMask(pTracker, cMax, pszWhat);
3090#endif
3091 NOREF(pszWhat); NOREF(cMax);
3092 return fGlobalChange;
3093}
3094
3095
3096/**
3097 * Init and termination worker for resetting the (host) event filter on the host
3098 *
3099 * @returns VBox status code.
3100 * @param pDevExt The device extension.
3101 * @param fFixedEvents Fixed events (init time).
3102 */
3103static int vgdrvResetEventFilterOnHost(PVBOXGUESTDEVEXT pDevExt, uint32_t fFixedEvents)
3104{
3105 VMMDevCtlGuestFilterMask *pReq;
3106 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
3107 if (RT_SUCCESS(rc))
3108 {
3109 pReq->u32NotMask = UINT32_MAX & ~fFixedEvents;
3110 pReq->u32OrMask = fFixedEvents;
3111 rc = VbglR0GRPerform(&pReq->header);
3112 if (RT_FAILURE(rc))
3113 LogRelFunc(("failed with rc=%Rrc\n", rc));
3114 VbglR0GRFree(&pReq->header);
3115 }
3116 RT_NOREF1(pDevExt);
3117 return rc;
3118}
3119
3120
3121/**
3122 * Changes the event filter mask for the given session.
3123 *
3124 * This is called in response to VBGL_IOCTL_CHANGE_FILTER_MASK as well as to do
3125 * session cleanup.
3126 *
3127 * @returns VBox status code.
3128 * @param pDevExt The device extension.
3129 * @param pSession The session.
3130 * @param fOrMask The events to add.
3131 * @param fNotMask The events to remove.
3132 * @param fSessionTermination Set if we're called by the session cleanup code.
3133 * This tweaks the error handling so we perform
3134 * proper session cleanup even if the host
3135 * misbehaves.
3136 *
3137 * @remarks Takes the session spinlock.
3138 */
3139static int vgdrvSetSessionEventFilter(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
3140 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination)
3141{
3142 VMMDevCtlGuestFilterMask *pReq;
3143 uint32_t fChanged;
3144 uint32_t fPrevious;
3145 int rc;
3146
3147 /*
3148 * Preallocate a request buffer so we can do all in one go without leaving the spinlock.
3149 */
3150 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
3151 if (RT_SUCCESS(rc))
3152 { /* nothing */ }
3153 else if (!fSessionTermination)
3154 {
3155 LogRel(("vgdrvSetSessionFilterMask: VbglR0GRAlloc failure: %Rrc\n", rc));
3156 return rc;
3157 }
3158 else
3159 pReq = NULL; /* Ignore failure, we must do session cleanup. */
3160
3161
3162 RTSpinlockAcquire(pDevExt->SessionSpinlock);
3163
3164 /*
3165 * Apply the changes to the session mask.
3166 */
3167 fPrevious = pSession->fEventFilter;
3168 pSession->fEventFilter |= fOrMask;
3169 pSession->fEventFilter &= ~fNotMask;
3170
3171 /*
3172 * If anything actually changed, update the global usage counters.
3173 */
3174 fChanged = fPrevious ^ pSession->fEventFilter;
3175 LogFlow(("vgdrvSetSessionEventFilter: Session->fEventFilter: %#x -> %#x (changed %#x)\n",
3176 fPrevious, pSession->fEventFilter, fChanged));
3177 if (fChanged)
3178 {
3179 bool fGlobalChange = vgdrvBitUsageTrackerChange(&pDevExt->EventFilterTracker, fChanged, fPrevious,
3180 pDevExt->cSessions, "EventFilterTracker");
3181
3182 /*
3183 * If there are global changes, update the event filter on the host.
3184 */
3185 if (fGlobalChange || pDevExt->fEventFilterHost == UINT32_MAX)
3186 {
3187 Assert(pReq || fSessionTermination);
3188 if (pReq)
3189 {
3190 pReq->u32OrMask = pDevExt->fFixedEvents | pDevExt->EventFilterTracker.fMask;
3191 if (pReq->u32OrMask == pDevExt->fEventFilterHost)
3192 rc = VINF_SUCCESS;
3193 else
3194 {
3195 pDevExt->fEventFilterHost = pReq->u32OrMask;
3196 pReq->u32NotMask = ~pReq->u32OrMask;
3197 rc = VbglR0GRPerform(&pReq->header);
3198 if (RT_FAILURE(rc))
3199 {
3200 /*
3201 * Failed, roll back (unless it's session termination time).
3202 */
3203 pDevExt->fEventFilterHost = UINT32_MAX;
3204 if (!fSessionTermination)
3205 {
3206 vgdrvBitUsageTrackerChange(&pDevExt->EventFilterTracker, fChanged, pSession->fEventFilter,
3207 pDevExt->cSessions, "EventFilterTracker");
3208 pSession->fEventFilter = fPrevious;
3209 }
3210 }
3211 }
3212 }
3213 else
3214 rc = VINF_SUCCESS;
3215 }
3216 }
3217
3218 RTSpinlockRelease(pDevExt->SessionSpinlock);
3219 if (pReq)
3220 VbglR0GRFree(&pReq->header);
3221 return rc;
3222}
3223
3224
3225/**
3226 * Handle VBGL_IOCTL_CHANGE_FILTER_MASK.
3227 *
3228 * @returns VBox status code.
3229 *
3230 * @param pDevExt The device extension.
3231 * @param pSession The session.
3232 * @param pInfo The request.
3233 */
3234static int vgdrvIoCtl_ChangeFilterMask(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCCHANGEFILTERMASK pInfo)
3235{
3236 LogFlow(("VBGL_IOCTL_CHANGE_FILTER_MASK: or=%#x not=%#x\n", pInfo->u.In.fOrMask, pInfo->u.In.fNotMask));
3237
3238 if ((pInfo->u.In.fOrMask | pInfo->u.In.fNotMask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
3239 {
3240 Log(("VBGL_IOCTL_CHANGE_FILTER_MASK: or=%#x not=%#x: Invalid masks!\n", pInfo->u.In.fOrMask, pInfo->u.In.fNotMask));
3241 return VERR_INVALID_PARAMETER;
3242 }
3243
3244 return vgdrvSetSessionEventFilter(pDevExt, pSession, pInfo->u.In.fOrMask, pInfo->u.In.fNotMask, false /*fSessionTermination*/);
3245}
3246
3247
3248/**
3249 * Init and termination worker for set mouse feature status to zero on the host.
3250 *
3251 * @returns VBox status code.
3252 * @param pDevExt The device extension.
3253 */
3254static int vgdrvResetMouseStatusOnHost(PVBOXGUESTDEVEXT pDevExt)
3255{
3256 VMMDevReqMouseStatus *pReq;
3257 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetMouseStatus);
3258 if (RT_SUCCESS(rc))
3259 {
3260 pReq->mouseFeatures = 0;
3261 pReq->pointerXPos = 0;
3262 pReq->pointerYPos = 0;
3263 rc = VbglR0GRPerform(&pReq->header);
3264 if (RT_FAILURE(rc))
3265 LogRelFunc(("failed with rc=%Rrc\n", rc));
3266 VbglR0GRFree(&pReq->header);
3267 }
3268 RT_NOREF1(pDevExt);
3269 return rc;
3270}
3271
3272
3273/**
3274 * Changes the mouse status mask for the given session.
3275 *
3276 * This is called in response to VBOXGUEST_IOCTL_SET_MOUSE_STATUS as well as to
3277 * do session cleanup.
3278 *
3279 * @returns VBox status code.
3280 * @param pDevExt The device extension.
3281 * @param pSession The session.
3282 * @param fOrMask The status flags to add.
3283 * @param fNotMask The status flags to remove.
3284 * @param fSessionTermination Set if we're called by the session cleanup code.
3285 * This tweaks the error handling so we perform
3286 * proper session cleanup even if the host
3287 * misbehaves.
3288 *
3289 * @remarks Takes the session spinlock.
3290 */
3291static int vgdrvSetSessionMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
3292 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination)
3293{
3294 VMMDevReqMouseStatus *pReq;
3295 uint32_t fChanged;
3296 uint32_t fPrevious;
3297 int rc;
3298
3299 /*
3300 * Preallocate a request buffer so we can do all in one go without leaving the spinlock.
3301 */
3302 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetMouseStatus);
3303 if (RT_SUCCESS(rc))
3304 { /* nothing */ }
3305 else if (!fSessionTermination)
3306 {
3307 LogRel(("vgdrvSetSessionMouseStatus: VbglR0GRAlloc failure: %Rrc\n", rc));
3308 return rc;
3309 }
3310 else
3311 pReq = NULL; /* Ignore failure, we must do session cleanup. */
3312
3313
3314 RTSpinlockAcquire(pDevExt->SessionSpinlock);
3315
3316 /*
3317 * Apply the changes to the session mask.
3318 */
3319 fPrevious = pSession->fMouseStatus;
3320 pSession->fMouseStatus |= fOrMask;
3321 pSession->fMouseStatus &= ~fNotMask;
3322
3323 /*
3324 * If anything actually changed, update the global usage counters.
3325 */
3326 fChanged = fPrevious ^ pSession->fMouseStatus;
3327 if (fChanged)
3328 {
3329 bool fGlobalChange = vgdrvBitUsageTrackerChange(&pDevExt->MouseStatusTracker, fChanged, fPrevious,
3330 pDevExt->cSessions, "MouseStatusTracker");
3331
3332 /*
3333 * If there are global changes, update the event filter on the host.
3334 */
3335 if (fGlobalChange || pDevExt->fMouseStatusHost == UINT32_MAX)
3336 {
3337 Assert(pReq || fSessionTermination);
3338 if (pReq)
3339 {
3340 pReq->mouseFeatures = pDevExt->MouseStatusTracker.fMask;
3341 if (pReq->mouseFeatures == pDevExt->fMouseStatusHost)
3342 rc = VINF_SUCCESS;
3343 else
3344 {
3345 pDevExt->fMouseStatusHost = pReq->mouseFeatures;
3346 pReq->pointerXPos = 0;
3347 pReq->pointerYPos = 0;
3348 rc = VbglR0GRPerform(&pReq->header);
3349 if (RT_FAILURE(rc))
3350 {
3351 /*
3352 * Failed, roll back (unless it's session termination time).
3353 */
3354 pDevExt->fMouseStatusHost = UINT32_MAX;
3355 if (!fSessionTermination)
3356 {
3357 vgdrvBitUsageTrackerChange(&pDevExt->MouseStatusTracker, fChanged, pSession->fMouseStatus,
3358 pDevExt->cSessions, "MouseStatusTracker");
3359 pSession->fMouseStatus = fPrevious;
3360 }
3361 }
3362 }
3363 }
3364 else
3365 rc = VINF_SUCCESS;
3366 }
3367 }
3368
3369 RTSpinlockRelease(pDevExt->SessionSpinlock);
3370 if (pReq)
3371 VbglR0GRFree(&pReq->header);
3372 return rc;
3373}
3374
3375
3376/**
3377 * Sets the mouse status features for this session and updates them globally.
3378 *
3379 * @returns VBox status code.
3380 *
3381 * @param pDevExt The device extention.
3382 * @param pSession The session.
3383 * @param fFeatures New bitmap of enabled features.
3384 */
3385static int vgdrvIoCtl_SetMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fFeatures)
3386{
3387 LogFlow(("VBGL_IOCTL_SET_MOUSE_STATUS: features=%#x\n", fFeatures));
3388
3389 if (fFeatures & ~VMMDEV_MOUSE_GUEST_MASK)
3390 return VERR_INVALID_PARAMETER;
3391
3392 return vgdrvSetSessionMouseStatus(pDevExt, pSession, fFeatures, ~fFeatures, false /*fSessionTermination*/);
3393}
3394
3395
3396/**
3397 * Return the mask of VMM device events that this session is allowed to see (wrt
3398 * to "acquire" mode guest capabilities).
3399 *
3400 * The events associated with guest capabilities in "acquire" mode will be
3401 * restricted to sessions which has acquired the respective capabilities.
3402 * If someone else tries to wait for acquired events, they won't be woken up
3403 * when the event becomes pending. Should some other thread in the session
3404 * acquire the capability while the corresponding event is pending, the waiting
3405 * thread will woken up.
3406 *
3407 * @returns Mask of events valid for the given session.
3408 * @param pDevExt The device extension.
3409 * @param pSession The session.
3410 *
3411 * @remarks Needs only be called when dispatching events in the
3412 * VBOXGUEST_ACQUIRE_STYLE_EVENTS mask.
3413 */
3414static uint32_t vgdrvGetAllowedEventMaskForSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
3415{
3416 uint32_t fAcquireModeGuestCaps;
3417 uint32_t fAcquiredGuestCaps;
3418 uint32_t fAllowedEvents;
3419
3420 /*
3421 * Note! Reads pSession->fAcquiredGuestCaps and pDevExt->fAcquireModeGuestCaps
3422 * WITHOUT holding VBOXGUESTDEVEXT::SessionSpinlock.
3423 */
3424 fAcquireModeGuestCaps = ASMAtomicUoReadU32(&pDevExt->fAcquireModeGuestCaps);
3425 if (fAcquireModeGuestCaps == 0)
3426 return VMMDEV_EVENT_VALID_EVENT_MASK;
3427 fAcquiredGuestCaps = ASMAtomicUoReadU32(&pSession->fAcquiredGuestCaps);
3428
3429 /*
3430 * Calculate which events to allow according to the cap config and caps
3431 * acquired by the session.
3432 */
3433 fAllowedEvents = VMMDEV_EVENT_VALID_EVENT_MASK;
3434 if ( !(fAcquiredGuestCaps & VMMDEV_GUEST_SUPPORTS_GRAPHICS)
3435 && (fAcquireModeGuestCaps & VMMDEV_GUEST_SUPPORTS_GRAPHICS))
3436 fAllowedEvents &= ~VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST;
3437
3438 if ( !(fAcquiredGuestCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
3439 && (fAcquireModeGuestCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS))
3440 fAllowedEvents &= ~VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
3441
3442 return fAllowedEvents;
3443}
3444
3445
3446/**
3447 * Init and termination worker for set guest capabilities to zero on the host.
3448 *
3449 * @returns VBox status code.
3450 * @param pDevExt The device extension.
3451 */
3452static int vgdrvResetCapabilitiesOnHost(PVBOXGUESTDEVEXT pDevExt)
3453{
3454 VMMDevReqGuestCapabilities2 *pReq;
3455 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
3456 if (RT_SUCCESS(rc))
3457 {
3458 pReq->u32NotMask = UINT32_MAX;
3459 pReq->u32OrMask = 0;
3460 rc = VbglR0GRPerform(&pReq->header);
3461
3462 if (RT_FAILURE(rc))
3463 LogRelFunc(("failed with rc=%Rrc\n", rc));
3464 VbglR0GRFree(&pReq->header);
3465 }
3466 RT_NOREF1(pDevExt);
3467 return rc;
3468}
3469
3470
3471/**
3472 * Sets the guest capabilities to the host while holding the lock.
3473 *
3474 * This will ASSUME that we're the ones in charge of the mask, so
3475 * we'll simply clear all bits we don't set.
3476 *
3477 * @returns VBox status code.
3478 * @param pDevExt The device extension.
3479 * @param pReq The request.
3480 */
3481static int vgdrvUpdateCapabilitiesOnHostWithReqAndLock(PVBOXGUESTDEVEXT pDevExt, VMMDevReqGuestCapabilities2 *pReq)
3482{
3483 int rc;
3484
3485 pReq->u32OrMask = pDevExt->fAcquiredGuestCaps | pDevExt->SetGuestCapsTracker.fMask;
3486 if (pReq->u32OrMask == pDevExt->fGuestCapsHost)
3487 rc = VINF_SUCCESS;
3488 else
3489 {
3490 pDevExt->fGuestCapsHost = pReq->u32OrMask;
3491 pReq->u32NotMask = ~pReq->u32OrMask;
3492 rc = VbglR0GRPerform(&pReq->header);
3493 if (RT_FAILURE(rc))
3494 pDevExt->fGuestCapsHost = UINT32_MAX;
3495 }
3496
3497 return rc;
3498}
3499
3500
3501/**
3502 * Switch a set of capabilities into "acquire" mode and (maybe) acquire them for
3503 * the given session.
3504 *
3505 * This is called in response to VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE as well as
3506 * to do session cleanup.
3507 *
3508 * @returns VBox status code.
3509 * @param pDevExt The device extension.
3510 * @param pSession The session.
3511 * @param fOrMask The capabilities to add .
3512 * @param fNotMask The capabilities to remove. Ignored in
3513 * VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE.
3514 * @param fFlags Confusing operation modifier.
3515 * VBOXGUESTCAPSACQUIRE_FLAGS_NONE means to both
3516 * configure and acquire/release the capabilities.
3517 * VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE
3518 * means only configure capabilities in the
3519 * @a fOrMask capabilities for "acquire" mode.
3520 * @param fSessionTermination Set if we're called by the session cleanup code.
3521 * This tweaks the error handling so we perform
3522 * proper session cleanup even if the host
3523 * misbehaves.
3524 *
3525 * @remarks Takes both the session and event spinlocks.
3526 */
3527static int vgdrvAcquireSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
3528 uint32_t fOrMask, uint32_t fNotMask, uint32_t fFlags,
3529 bool fSessionTermination)
3530{
3531 uint32_t fCurrentOwnedCaps;
3532 uint32_t fSessionRemovedCaps;
3533 uint32_t fSessionAddedCaps;
3534 uint32_t fOtherConflictingCaps;
3535 VMMDevReqGuestCapabilities2 *pReq = NULL;
3536 int rc;
3537
3538
3539 /*
3540 * Validate and adjust input.
3541 */
3542 if (fOrMask & ~( VMMDEV_GUEST_SUPPORTS_SEAMLESS
3543 | VMMDEV_GUEST_SUPPORTS_GUEST_HOST_WINDOW_MAPPING
3544 | VMMDEV_GUEST_SUPPORTS_GRAPHICS ) )
3545 {
3546 LogRel(("vgdrvAcquireSessionCapabilities: invalid fOrMask=%#x (pSession=%p fNotMask=%#x fFlags=%#x)\n",
3547 fOrMask, pSession, fNotMask, fFlags));
3548 return VERR_INVALID_PARAMETER;
3549 }
3550
3551 if ((fFlags & ~VBGL_IOC_AGC_FLAGS_VALID_MASK) != 0)
3552 {
3553 LogRel(("vgdrvAcquireSessionCapabilities: invalid fFlags=%#x (pSession=%p fOrMask=%#x fNotMask=%#x)\n",
3554 fFlags, pSession, fOrMask, fNotMask));
3555 return VERR_INVALID_PARAMETER;
3556 }
3557 Assert(!fOrMask || !fSessionTermination);
3558
3559 /* The fNotMask no need to have all values valid, invalid ones will simply be ignored. */
3560 fNotMask &= ~fOrMask;
3561
3562 /*
3563 * Preallocate a update request if we're about to do more than just configure
3564 * the capability mode.
3565 */
3566 if (!(fFlags & VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE))
3567 {
3568 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
3569 if (RT_SUCCESS(rc))
3570 { /* do nothing */ }
3571 else if (!fSessionTermination)
3572 {
3573 LogRel(("vgdrvAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x fFlags=%#x: VbglR0GRAlloc failure: %Rrc\n",
3574 pSession, fOrMask, fNotMask, fFlags, rc));
3575 return rc;
3576 }
3577 else
3578 pReq = NULL; /* Ignore failure, we must do session cleanup. */
3579 }
3580
3581 /*
3582 * Try switch the capabilities in the OR mask into "acquire" mode.
3583 *
3584 * Note! We currently ignore anyone which may already have "set" the capabilities
3585 * in fOrMask. Perhaps not the best way to handle it, but it's simple...
3586 */
3587 RTSpinlockAcquire(pDevExt->EventSpinlock);
3588
3589 if (!(pDevExt->fSetModeGuestCaps & fOrMask))
3590 pDevExt->fAcquireModeGuestCaps |= fOrMask;
3591 else
3592 {
3593 RTSpinlockRelease(pDevExt->EventSpinlock);
3594
3595 if (pReq)
3596 VbglR0GRFree(&pReq->header);
3597 AssertMsgFailed(("Trying to change caps mode: %#x\n", fOrMask));
3598 LogRel(("vgdrvAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x fFlags=%#x: calling caps acquire for set caps\n",
3599 pSession, fOrMask, fNotMask, fFlags));
3600 return VERR_INVALID_STATE;
3601 }
3602
3603 /*
3604 * If we only wanted to switch the capabilities into "acquire" mode, we're done now.
3605 */
3606 if (fFlags & VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE)
3607 {
3608 RTSpinlockRelease(pDevExt->EventSpinlock);
3609
3610 Assert(!pReq);
3611 Log(("vgdrvAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x fFlags=%#x: configured acquire caps: 0x%x\n",
3612 pSession, fOrMask, fNotMask, fFlags));
3613 return VINF_SUCCESS;
3614 }
3615 Assert(pReq || fSessionTermination);
3616
3617 /*
3618 * Caller wants to acquire/release the capabilities too.
3619 *
3620 * Note! The mode change of the capabilities above won't be reverted on
3621 * failure, this is intentional.
3622 */
3623 fCurrentOwnedCaps = pSession->fAcquiredGuestCaps;
3624 fSessionRemovedCaps = fCurrentOwnedCaps & fNotMask;
3625 fSessionAddedCaps = fOrMask & ~fCurrentOwnedCaps;
3626 fOtherConflictingCaps = pDevExt->fAcquiredGuestCaps & ~fCurrentOwnedCaps;
3627 fOtherConflictingCaps &= fSessionAddedCaps;
3628
3629 if (!fOtherConflictingCaps)
3630 {
3631 if (fSessionAddedCaps)
3632 {
3633 pSession->fAcquiredGuestCaps |= fSessionAddedCaps;
3634 pDevExt->fAcquiredGuestCaps |= fSessionAddedCaps;
3635 }
3636
3637 if (fSessionRemovedCaps)
3638 {
3639 pSession->fAcquiredGuestCaps &= ~fSessionRemovedCaps;
3640 pDevExt->fAcquiredGuestCaps &= ~fSessionRemovedCaps;
3641 }
3642
3643 /*
3644 * If something changes (which is very likely), tell the host.
3645 */
3646 if (fSessionAddedCaps || fSessionRemovedCaps || pDevExt->fGuestCapsHost == UINT32_MAX)
3647 {
3648 Assert(pReq || fSessionTermination);
3649 if (pReq)
3650 {
3651 rc = vgdrvUpdateCapabilitiesOnHostWithReqAndLock(pDevExt, pReq);
3652 if (RT_FAILURE(rc) && !fSessionTermination)
3653 {
3654 /* Failed, roll back. */
3655 if (fSessionAddedCaps)
3656 {
3657 pSession->fAcquiredGuestCaps &= ~fSessionAddedCaps;
3658 pDevExt->fAcquiredGuestCaps &= ~fSessionAddedCaps;
3659 }
3660 if (fSessionRemovedCaps)
3661 {
3662 pSession->fAcquiredGuestCaps |= fSessionRemovedCaps;
3663 pDevExt->fAcquiredGuestCaps |= fSessionRemovedCaps;
3664 }
3665
3666 RTSpinlockRelease(pDevExt->EventSpinlock);
3667 LogRel(("vgdrvAcquireSessionCapabilities: vgdrvUpdateCapabilitiesOnHostWithReqAndLock failed: rc=%Rrc\n", rc));
3668 VbglR0GRFree(&pReq->header);
3669 return rc;
3670 }
3671 }
3672 }
3673 }
3674 else
3675 {
3676 RTSpinlockRelease(pDevExt->EventSpinlock);
3677
3678 Log(("vgdrvAcquireSessionCapabilities: Caps %#x were busy\n", fOtherConflictingCaps));
3679 VbglR0GRFree(&pReq->header);
3680 return VERR_RESOURCE_BUSY;
3681 }
3682
3683 RTSpinlockRelease(pDevExt->EventSpinlock);
3684 if (pReq)
3685 VbglR0GRFree(&pReq->header);
3686
3687 /*
3688 * If we added a capability, check if that means some other thread in our
3689 * session should be unblocked because there are events pending.
3690 *
3691 * HACK ALERT! When the seamless support capability is added we generate a
3692 * seamless change event so that the ring-3 client can sync with
3693 * the seamless state. Although this introduces a spurious
3694 * wakeups of the ring-3 client, it solves the problem of client
3695 * state inconsistency in multiuser environment (on Windows).
3696 */
3697 if (fSessionAddedCaps)
3698 {
3699 uint32_t fGenFakeEvents = 0;
3700 if (fSessionAddedCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
3701 fGenFakeEvents |= VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
3702
3703 RTSpinlockAcquire(pDevExt->EventSpinlock);
3704 if (fGenFakeEvents || pDevExt->f32PendingEvents)
3705 vgdrvDispatchEventsLocked(pDevExt, fGenFakeEvents);
3706 RTSpinlockRelease(pDevExt->EventSpinlock);
3707
3708#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
3709 VGDrvCommonWaitDoWakeUps(pDevExt);
3710#endif
3711 }
3712
3713 return VINF_SUCCESS;
3714}
3715
3716
3717/**
3718 * Handle VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES.
3719 *
3720 * @returns VBox status code.
3721 *
3722 * @param pDevExt The device extension.
3723 * @param pSession The session.
3724 * @param pAcquire The request.
3725 */
3726static int vgdrvIoCtl_GuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCACQUIREGUESTCAPS pAcquire)
3727{
3728 int rc;
3729 LogFlow(("VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES: or=%#x not=%#x flags=%#x\n",
3730 pAcquire->u.In.fOrMask, pAcquire->u.In.fNotMask, pAcquire->u.In.fFlags));
3731
3732 rc = vgdrvAcquireSessionCapabilities(pDevExt, pSession, pAcquire->u.In.fOrMask, pAcquire->u.In.fNotMask,
3733 pAcquire->u.In.fFlags, false /*fSessionTermination*/);
3734 if (RT_FAILURE(rc))
3735 LogRel(("VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES failed rc=%Rrc\n", rc));
3736 return rc;
3737}
3738
3739
3740/**
3741 * Sets the guest capabilities for a session.
3742 *
3743 * @returns VBox status code.
3744 * @param pDevExt The device extension.
3745 * @param pSession The session.
3746 * @param fOrMask The capabilities to add.
3747 * @param fNotMask The capabilities to remove.
3748 * @param pfSessionCaps Where to return the guest capabilities reported
3749 * for this session. Optional.
3750 * @param pfGlobalCaps Where to return the guest capabilities reported
3751 * for all the sessions. Optional.
3752 *
3753 * @param fSessionTermination Set if we're called by the session cleanup code.
3754 * This tweaks the error handling so we perform
3755 * proper session cleanup even if the host
3756 * misbehaves.
3757 *
3758 * @remarks Takes the session spinlock.
3759 */
3760static int vgdrvSetSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
3761 uint32_t fOrMask, uint32_t fNotMask, uint32_t *pfSessionCaps, uint32_t *pfGlobalCaps,
3762 bool fSessionTermination)
3763{
3764 /*
3765 * Preallocate a request buffer so we can do all in one go without leaving the spinlock.
3766 */
3767 VMMDevReqGuestCapabilities2 *pReq;
3768 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
3769 if (RT_SUCCESS(rc))
3770 { /* nothing */ }
3771 else if (!fSessionTermination)
3772 {
3773 if (pfSessionCaps)
3774 *pfSessionCaps = UINT32_MAX;
3775 if (pfGlobalCaps)
3776 *pfGlobalCaps = UINT32_MAX;
3777 LogRel(("vgdrvSetSessionCapabilities: VbglR0GRAlloc failure: %Rrc\n", rc));
3778 return rc;
3779 }
3780 else
3781 pReq = NULL; /* Ignore failure, we must do session cleanup. */
3782
3783
3784 RTSpinlockAcquire(pDevExt->SessionSpinlock);
3785
3786#ifndef VBOXGUEST_DISREGARD_ACQUIRE_MODE_GUEST_CAPS
3787 /*
3788 * Capabilities in "acquire" mode cannot be set via this API.
3789 * (Acquire mode is only used on windows at the time of writing.)
3790 */
3791 if (!(fOrMask & pDevExt->fAcquireModeGuestCaps))
3792#endif
3793 {
3794 /*
3795 * Apply the changes to the session mask.
3796 */
3797 uint32_t fChanged;
3798 uint32_t fPrevious = pSession->fCapabilities;
3799 pSession->fCapabilities |= fOrMask;
3800 pSession->fCapabilities &= ~fNotMask;
3801
3802 /*
3803 * If anything actually changed, update the global usage counters.
3804 */
3805 fChanged = fPrevious ^ pSession->fCapabilities;
3806 if (fChanged)
3807 {
3808 bool fGlobalChange = vgdrvBitUsageTrackerChange(&pDevExt->SetGuestCapsTracker, fChanged, fPrevious,
3809 pDevExt->cSessions, "SetGuestCapsTracker");
3810
3811 /*
3812 * If there are global changes, update the capabilities on the host.
3813 */
3814 if (fGlobalChange || pDevExt->fGuestCapsHost == UINT32_MAX)
3815 {
3816 Assert(pReq || fSessionTermination);
3817 if (pReq)
3818 {
3819 rc = vgdrvUpdateCapabilitiesOnHostWithReqAndLock(pDevExt, pReq);
3820
3821 /* On failure, roll back (unless it's session termination time). */
3822 if (RT_FAILURE(rc) && !fSessionTermination)
3823 {
3824 vgdrvBitUsageTrackerChange(&pDevExt->SetGuestCapsTracker, fChanged, pSession->fCapabilities,
3825 pDevExt->cSessions, "SetGuestCapsTracker");
3826 pSession->fCapabilities = fPrevious;
3827 }
3828 }
3829 }
3830 }
3831 }
3832#ifndef VBOXGUEST_DISREGARD_ACQUIRE_MODE_GUEST_CAPS
3833 else
3834 rc = VERR_RESOURCE_BUSY;
3835#endif
3836
3837 if (pfSessionCaps)
3838 *pfSessionCaps = pSession->fCapabilities;
3839 if (pfGlobalCaps)
3840 *pfGlobalCaps = pDevExt->fAcquiredGuestCaps | pDevExt->SetGuestCapsTracker.fMask;
3841
3842 RTSpinlockRelease(pDevExt->SessionSpinlock);
3843 if (pReq)
3844 VbglR0GRFree(&pReq->header);
3845 return rc;
3846}
3847
3848
3849/**
3850 * Handle VBGL_IOCTL_CHANGE_GUEST_CAPABILITIES.
3851 *
3852 * @returns VBox status code.
3853 *
3854 * @param pDevExt The device extension.
3855 * @param pSession The session.
3856 * @param pInfo The request.
3857 */
3858static int vgdrvIoCtl_SetCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCSETGUESTCAPS pInfo)
3859{
3860 int rc;
3861 LogFlow(("VBGL_IOCTL_CHANGE_GUEST_CAPABILITIES: or=%#x not=%#x\n", pInfo->u.In.fOrMask, pInfo->u.In.fNotMask));
3862
3863 if (!((pInfo->u.In.fOrMask | pInfo->u.In.fNotMask) & ~VMMDEV_GUEST_CAPABILITIES_MASK))
3864 rc = vgdrvSetSessionCapabilities(pDevExt, pSession, pInfo->u.In.fOrMask, pInfo->u.In.fNotMask,
3865 &pInfo->u.Out.fSessionCaps, &pInfo->u.Out.fGlobalCaps, false /*fSessionTermination*/);
3866 else
3867 rc = VERR_INVALID_PARAMETER;
3868
3869 return rc;
3870}
3871
3872/** @} */
3873
3874
3875/**
3876 * Common IOCtl for user to kernel and kernel to kernel communication.
3877 *
3878 * This function only does the basic validation and then invokes
3879 * worker functions that takes care of each specific function.
3880 *
3881 * @returns VBox status code.
3882 *
3883 * @param iFunction The requested function.
3884 * @param pDevExt The device extension.
3885 * @param pSession The client session.
3886 * @param pReqHdr Pointer to the request. This always starts with
3887 * a request common header.
3888 * @param cbReq The max size of the request buffer.
3889 */
3890int VGDrvCommonIoCtl(uintptr_t iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLREQHDR pReqHdr, size_t cbReq)
3891{
3892 uintptr_t const iFunctionStripped = VBGL_IOCTL_CODE_STRIPPED(iFunction);
3893 int rc;
3894
3895 LogFlow(("VGDrvCommonIoCtl: iFunction=%#x pDevExt=%p pSession=%p pReqHdr=%p cbReq=%zu\n",
3896 iFunction, pDevExt, pSession, pReqHdr, cbReq));
3897
3898 /*
3899 * Define some helper macros to simplify validation.
3900 */
3901#define REQ_CHECK_SIZES_EX(Name, cbInExpect, cbOutExpect) \
3902 do { \
3903 if (RT_LIKELY( pReqHdr->cbIn == (cbInExpect) \
3904 && ( pReqHdr->cbOut == (cbOutExpect) \
3905 || ((cbInExpect) == (cbOutExpect) && pReqHdr->cbOut == 0) ) )) \
3906 { /* likely */ } \
3907 else \
3908 { \
3909 Log(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n", \
3910 (long)pReqHdr->cbIn, (long)(cbInExpect), (long)pReqHdr->cbOut, (long)(cbOutExpect))); \
3911 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
3912 } \
3913 } while (0)
3914
3915#define REQ_CHECK_SIZES(Name) REQ_CHECK_SIZES_EX(Name, Name ## _SIZE_IN, Name ## _SIZE_OUT)
3916
3917#define REQ_CHECK_SIZE_IN(Name, cbInExpect) \
3918 do { \
3919 if (RT_LIKELY(pReqHdr->cbIn == (cbInExpect))) \
3920 { /* likely */ } \
3921 else \
3922 { \
3923 Log(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld.\n", \
3924 (long)pReqHdr->cbIn, (long)(cbInExpect))); \
3925 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
3926 } \
3927 } while (0)
3928
3929#define REQ_CHECK_SIZE_OUT(Name, cbOutExpect) \
3930 do { \
3931 if (RT_LIKELY( pReqHdr->cbOut == (cbOutExpect) \
3932 || (pReqHdr->cbOut == 0 && pReqHdr->cbIn == (cbOutExpect)))) \
3933 { /* likely */ } \
3934 else \
3935 { \
3936 Log(( #Name ": Invalid input/output sizes. cbOut=%ld (%ld) expected %ld.\n", \
3937 (long)pReqHdr->cbOut, (long)pReqHdr->cbIn, (long)(cbOutExpect))); \
3938 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
3939 } \
3940 } while (0)
3941
3942#define REQ_CHECK_EXPR(Name, expr) \
3943 do { \
3944 if (RT_LIKELY(!!(expr))) \
3945 { /* likely */ } \
3946 else \
3947 { \
3948 Log(( #Name ": %s\n", #expr)); \
3949 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
3950 } \
3951 } while (0)
3952
3953#define REQ_CHECK_EXPR_FMT(expr, fmt) \
3954 do { \
3955 if (RT_LIKELY(!!(expr))) \
3956 { /* likely */ } \
3957 else \
3958 { \
3959 Log( fmt ); \
3960 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
3961 } \
3962 } while (0)
3963
3964#define REQ_CHECK_RING0(mnemonic) \
3965 do { \
3966 if (pSession->R0Process != NIL_RTR0PROCESS) \
3967 { \
3968 LogFunc((mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
3969 pSession->Process, (uintptr_t)pSession->R0Process)); \
3970 return pReqHdr->rc = VERR_PERMISSION_DENIED; \
3971 } \
3972 } while (0)
3973
3974
3975 /*
3976 * Validate the request.
3977 */
3978 if (RT_LIKELY(cbReq >= sizeof(*pReqHdr)))
3979 { /* likely */ }
3980 else
3981 {
3982 Log(("VGDrvCommonIoCtl: Bad ioctl request size; cbReq=%#lx\n", (long)cbReq));
3983 return VERR_INVALID_PARAMETER;
3984 }
3985
3986 if (pReqHdr->cbOut == 0)
3987 pReqHdr->cbOut = pReqHdr->cbIn;
3988
3989 if (RT_LIKELY( pReqHdr->uVersion == VBGLREQHDR_VERSION
3990 && pReqHdr->cbIn >= sizeof(*pReqHdr)
3991 && pReqHdr->cbIn <= cbReq
3992 && pReqHdr->cbOut >= sizeof(*pReqHdr)
3993 && pReqHdr->cbOut <= cbReq))
3994 { /* likely */ }
3995 else
3996 {
3997 Log(("VGDrvCommonIoCtl: Bad ioctl request header; cbIn=%#lx cbOut=%#lx version=%#lx\n",
3998 (long)pReqHdr->cbIn, (long)pReqHdr->cbOut, (long)pReqHdr->uVersion));
3999 return VERR_INVALID_PARAMETER;
4000 }
4001
4002 if (RT_LIKELY(RT_VALID_PTR(pSession)))
4003 { /* likely */ }
4004 else
4005 {
4006 Log(("VGDrvCommonIoCtl: Invalid pSession value %p (ioctl=%#x)\n", pSession, iFunction));
4007 return VERR_INVALID_PARAMETER;
4008 }
4009
4010
4011 /*
4012 * Deal with variably sized requests first.
4013 */
4014 rc = VINF_SUCCESS;
4015 if ( iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_VMMDEV_REQUEST(0))
4016 || iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_VMMDEV_REQUEST_BIG) )
4017 {
4018 REQ_CHECK_EXPR(VBGL_IOCTL_VMMDEV_REQUEST, pReqHdr->uType != VBGLREQHDR_TYPE_DEFAULT);
4019 REQ_CHECK_EXPR_FMT(pReqHdr->cbIn == pReqHdr->cbOut,
4020 ("VBGL_IOCTL_VMMDEV_REQUEST: cbIn=%ld != cbOut=%ld\n", (long)pReqHdr->cbIn, (long)pReqHdr->cbOut));
4021 pReqHdr->rc = vgdrvIoCtl_VMMDevRequest(pDevExt, pSession, (VMMDevRequestHeader *)pReqHdr, cbReq);
4022 }
4023 else if (RT_LIKELY(pReqHdr->uType == VBGLREQHDR_TYPE_DEFAULT))
4024 {
4025 if (iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_LOG(0)))
4026 {
4027 REQ_CHECK_SIZE_OUT(VBGL_IOCTL_LOG, VBGL_IOCTL_LOG_SIZE_OUT);
4028 pReqHdr->rc = vgdrvIoCtl_Log(pDevExt, &((PVBGLIOCLOG)pReqHdr)->u.In.szMsg[0], pReqHdr->cbIn - sizeof(VBGLREQHDR),
4029 pSession->fUserSession);
4030 }
4031#ifdef VBOX_WITH_HGCM
4032 else if ( iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_HGCM_CALL(0))
4033# if ARCH_BITS == 64
4034 || iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_HGCM_CALL_32(0))
4035# endif
4036 )
4037 {
4038 REQ_CHECK_EXPR(VBGL_IOCTL_HGCM_CALL, pReqHdr->cbIn >= sizeof(VBGLIOCHGCMCALL));
4039 REQ_CHECK_EXPR(VBGL_IOCTL_HGCM_CALL, pReqHdr->cbIn == pReqHdr->cbOut);
4040 pReqHdr->rc = vgdrvIoCtl_HGCMCallWrapper(pDevExt, pSession, (PVBGLIOCHGCMCALL)pReqHdr,
4041 iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_HGCM_CALL_32(0)),
4042 false /*fUserData*/, cbReq);
4043 }
4044 else if (iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_HGCM_CALL_WITH_USER_DATA(0)))
4045 {
4046 REQ_CHECK_RING0("VBGL_IOCTL_HGCM_CALL_WITH_USER_DATA");
4047 REQ_CHECK_EXPR(VBGL_IOCTL_HGCM_CALL, pReqHdr->cbIn >= sizeof(VBGLIOCHGCMCALL));
4048 REQ_CHECK_EXPR(VBGL_IOCTL_HGCM_CALL, pReqHdr->cbIn == pReqHdr->cbOut);
4049 pReqHdr->rc = vgdrvIoCtl_HGCMCallWrapper(pDevExt, pSession, (PVBGLIOCHGCMCALL)pReqHdr,
4050 ARCH_BITS == 32, true /*fUserData*/, cbReq);
4051 }
4052#endif /* VBOX_WITH_HGCM */
4053 else
4054 {
4055 switch (iFunction)
4056 {
4057 /*
4058 * Ring-0 only:
4059 */
4060 case VBGL_IOCTL_IDC_CONNECT:
4061 REQ_CHECK_RING0("VBGL_IOCL_IDC_CONNECT");
4062 REQ_CHECK_SIZES(VBGL_IOCTL_IDC_CONNECT);
4063 pReqHdr->rc = vgdrvIoCtl_IdcConnect(pDevExt, pSession, (PVBGLIOCIDCCONNECT)pReqHdr);
4064 break;
4065
4066 case VBGL_IOCTL_IDC_DISCONNECT:
4067 REQ_CHECK_RING0("VBGL_IOCTL_IDC_DISCONNECT");
4068 REQ_CHECK_SIZES(VBGL_IOCTL_IDC_DISCONNECT);
4069 pReqHdr->rc = vgdrvIoCtl_IdcDisconnect(pDevExt, pSession, (PVBGLIOCIDCDISCONNECT)pReqHdr);
4070 break;
4071
4072 case VBGL_IOCTL_GET_VMMDEV_IO_INFO:
4073 REQ_CHECK_RING0("GET_VMMDEV_IO_INFO");
4074 REQ_CHECK_SIZES(VBGL_IOCTL_GET_VMMDEV_IO_INFO);
4075 pReqHdr->rc = vgdrvIoCtl_GetVMMDevIoInfo(pDevExt, (PVBGLIOCGETVMMDEVIOINFO)pReqHdr);
4076 break;
4077
4078 case VBGL_IOCTL_SET_MOUSE_NOTIFY_CALLBACK:
4079 REQ_CHECK_RING0("SET_MOUSE_NOTIFY_CALLBACK");
4080 REQ_CHECK_SIZES(VBGL_IOCTL_SET_MOUSE_NOTIFY_CALLBACK);
4081 pReqHdr->rc = vgdrvIoCtl_SetMouseNotifyCallback(pDevExt, (PVBGLIOCSETMOUSENOTIFYCALLBACK)pReqHdr);
4082 break;
4083
4084 /*
4085 * Ring-3 only:
4086 */
4087 case VBGL_IOCTL_DRIVER_VERSION_INFO:
4088 REQ_CHECK_SIZES(VBGL_IOCTL_DRIVER_VERSION_INFO);
4089 pReqHdr->rc = vgdrvIoCtl_DriverVersionInfo(pDevExt, pSession, (PVBGLIOCDRIVERVERSIONINFO)pReqHdr);
4090 break;
4091
4092 /*
4093 * Both ring-3 and ring-0:
4094 */
4095 case VBGL_IOCTL_WAIT_FOR_EVENTS:
4096 REQ_CHECK_SIZES(VBGL_IOCTL_WAIT_FOR_EVENTS);
4097 pReqHdr->rc = vgdrvIoCtl_WaitForEvents(pDevExt, pSession, (VBGLIOCWAITFOREVENTS *)pReqHdr,
4098 pSession->R0Process != NIL_RTR0PROCESS);
4099 break;
4100
4101 case VBGL_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS:
4102 REQ_CHECK_SIZES(VBGL_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS);
4103 pReqHdr->rc = vgdrvIoCtl_CancelAllWaitEvents(pDevExt, pSession);
4104 break;
4105
4106 case VBGL_IOCTL_CHANGE_FILTER_MASK:
4107 REQ_CHECK_SIZES(VBGL_IOCTL_CHANGE_FILTER_MASK);
4108 pReqHdr->rc = vgdrvIoCtl_ChangeFilterMask(pDevExt, pSession, (PVBGLIOCCHANGEFILTERMASK)pReqHdr);
4109 break;
4110
4111#ifdef VBOX_WITH_HGCM
4112 case VBGL_IOCTL_HGCM_CONNECT:
4113 REQ_CHECK_SIZES(VBGL_IOCTL_HGCM_CONNECT);
4114 pReqHdr->rc = vgdrvIoCtl_HGCMConnect(pDevExt, pSession, (PVBGLIOCHGCMCONNECT)pReqHdr);
4115 break;
4116
4117 case VBGL_IOCTL_HGCM_DISCONNECT:
4118 REQ_CHECK_SIZES(VBGL_IOCTL_HGCM_DISCONNECT);
4119 pReqHdr->rc = vgdrvIoCtl_HGCMDisconnect(pDevExt, pSession, (PVBGLIOCHGCMDISCONNECT)pReqHdr);
4120 break;
4121#endif
4122
4123 case VBGL_IOCTL_CHECK_BALLOON:
4124 REQ_CHECK_SIZES(VBGL_IOCTL_CHECK_BALLOON);
4125 pReqHdr->rc = vgdrvIoCtl_CheckMemoryBalloon(pDevExt, pSession, (PVBGLIOCCHECKBALLOON)pReqHdr);
4126 break;
4127
4128 case VBGL_IOCTL_CHANGE_BALLOON:
4129 REQ_CHECK_SIZES(VBGL_IOCTL_CHANGE_BALLOON);
4130 pReqHdr->rc = vgdrvIoCtl_ChangeMemoryBalloon(pDevExt, pSession, (PVBGLIOCCHANGEBALLOON)pReqHdr);
4131 break;
4132
4133 case VBGL_IOCTL_WRITE_CORE_DUMP:
4134 REQ_CHECK_SIZES(VBGL_IOCTL_WRITE_CORE_DUMP);
4135 pReqHdr->rc = vgdrvIoCtl_WriteCoreDump(pDevExt, (PVBGLIOCWRITECOREDUMP)pReqHdr);
4136 break;
4137
4138 case VBGL_IOCTL_SET_MOUSE_STATUS:
4139 REQ_CHECK_SIZES(VBGL_IOCTL_SET_MOUSE_STATUS);
4140 pReqHdr->rc = vgdrvIoCtl_SetMouseStatus(pDevExt, pSession, ((PVBGLIOCSETMOUSESTATUS)pReqHdr)->u.In.fStatus);
4141 break;
4142
4143 case VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES:
4144 REQ_CHECK_SIZES(VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES);
4145 pReqHdr->rc = vgdrvIoCtl_GuestCapsAcquire(pDevExt, pSession, (PVBGLIOCACQUIREGUESTCAPS)pReqHdr);
4146 break;
4147
4148 case VBGL_IOCTL_CHANGE_GUEST_CAPABILITIES:
4149 REQ_CHECK_SIZES(VBGL_IOCTL_CHANGE_GUEST_CAPABILITIES);
4150 pReqHdr->rc = vgdrvIoCtl_SetCapabilities(pDevExt, pSession, (PVBGLIOCSETGUESTCAPS)pReqHdr);
4151 break;
4152
4153#ifdef VBOX_WITH_DPC_LATENCY_CHECKER
4154 case VBGL_IOCTL_DPC_LATENCY_CHECKER:
4155 REQ_CHECK_SIZES(VBGL_IOCTL_DPC_LATENCY_CHECKER);
4156 pReqHdr->rc = VGDrvNtIOCtl_DpcLatencyChecker();
4157 break;
4158#endif
4159
4160 default:
4161 {
4162 LogRel(("VGDrvCommonIoCtl: Unknown request iFunction=%#x (stripped %#x) cbReq=%#x\n",
4163 iFunction, iFunctionStripped, cbReq));
4164 pReqHdr->rc = rc = VERR_NOT_SUPPORTED;
4165 break;
4166 }
4167 }
4168 }
4169 }
4170 else
4171 {
4172 Log(("VGDrvCommonIoCtl: uType=%#x, expected default (ioctl=%#x)\n", pReqHdr->uType, iFunction));
4173 return VERR_INVALID_PARAMETER;
4174 }
4175
4176 LogFlow(("VGDrvCommonIoCtl: returns %Rrc (req: rc=%Rrc cbOut=%#x)\n", rc, pReqHdr->rc, pReqHdr->cbOut));
4177 return rc;
4178}
4179
4180
4181/**
4182 * Used by VGDrvCommonISR as well as the acquire guest capability code.
4183 *
4184 * @returns VINF_SUCCESS on success. On failure, ORed together
4185 * RTSemEventMultiSignal errors (completes processing despite errors).
4186 * @param pDevExt The VBoxGuest device extension.
4187 * @param fEvents The events to dispatch.
4188 */
4189static int vgdrvDispatchEventsLocked(PVBOXGUESTDEVEXT pDevExt, uint32_t fEvents)
4190{
4191 PVBOXGUESTWAIT pWait;
4192 PVBOXGUESTWAIT pSafe;
4193 int rc = VINF_SUCCESS;
4194
4195 fEvents |= pDevExt->f32PendingEvents;
4196
4197 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
4198 {
4199 uint32_t fHandledEvents = pWait->fReqEvents & fEvents;
4200 if ( fHandledEvents != 0
4201 && !pWait->fResEvents)
4202 {
4203 /* Does this one wait on any of the events we're dispatching? We do a quick
4204 check first, then deal with VBOXGUEST_ACQUIRE_STYLE_EVENTS as applicable. */
4205 if (fHandledEvents & VBOXGUEST_ACQUIRE_STYLE_EVENTS)
4206 fHandledEvents &= vgdrvGetAllowedEventMaskForSession(pDevExt, pWait->pSession);
4207 if (fHandledEvents)
4208 {
4209 pWait->fResEvents = pWait->fReqEvents & fEvents & fHandledEvents;
4210 fEvents &= ~pWait->fResEvents;
4211 RTListNodeRemove(&pWait->ListNode);
4212#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
4213 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
4214#else
4215 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
4216 rc |= RTSemEventMultiSignal(pWait->Event);
4217#endif
4218 if (!fEvents)
4219 break;
4220 }
4221 }
4222 }
4223
4224 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
4225 return rc;
4226}
4227
4228
4229/**
4230 * Simply checks whether the IRQ is ours or not, does not do any interrupt
4231 * procesing.
4232 *
4233 * @returns true if it was our interrupt, false if it wasn't.
4234 * @param pDevExt The VBoxGuest device extension.
4235 */
4236bool VGDrvCommonIsOurIRQ(PVBOXGUESTDEVEXT pDevExt)
4237{
4238 VMMDevMemory volatile *pVMMDevMemory;
4239 bool fOurIrq;
4240
4241 RTSpinlockAcquire(pDevExt->EventSpinlock);
4242 pVMMDevMemory = pDevExt->pVMMDevMemory;
4243 fOurIrq = pVMMDevMemory ? pVMMDevMemory->V.V1_04.fHaveEvents : false;
4244 RTSpinlockRelease(pDevExt->EventSpinlock);
4245
4246 return fOurIrq;
4247}
4248
4249
4250/**
4251 * Common interrupt service routine.
4252 *
4253 * This deals with events and with waking up thread waiting for those events.
4254 *
4255 * @returns true if it was our interrupt, false if it wasn't.
4256 * @param pDevExt The VBoxGuest device extension.
4257 */
4258bool VGDrvCommonISR(PVBOXGUESTDEVEXT pDevExt)
4259{
4260 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
4261 bool fMousePositionChanged = false;
4262 int rc = 0;
4263 VMMDevMemory volatile *pVMMDevMemory;
4264 bool fOurIrq;
4265
4266 /*
4267 * Make sure we've initialized the device extension.
4268 */
4269 if (RT_UNLIKELY(!pReq))
4270 return false;
4271
4272 /*
4273 * Enter the spinlock and check if it's our IRQ or not.
4274 */
4275 RTSpinlockAcquire(pDevExt->EventSpinlock);
4276 pVMMDevMemory = pDevExt->pVMMDevMemory;
4277 fOurIrq = pVMMDevMemory ? pVMMDevMemory->V.V1_04.fHaveEvents : false;
4278 if (fOurIrq)
4279 {
4280 /*
4281 * Acknowlegde events.
4282 * We don't use VbglR0GRPerform here as it may take another spinlocks.
4283 */
4284 pReq->header.rc = VERR_INTERNAL_ERROR;
4285 pReq->events = 0;
4286 ASMCompilerBarrier();
4287 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
4288 ASMCompilerBarrier(); /* paranoia */
4289 if (RT_SUCCESS(pReq->header.rc))
4290 {
4291 uint32_t fEvents = pReq->events;
4292
4293 Log3(("VGDrvCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
4294
4295 /*
4296 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
4297 */
4298 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
4299 {
4300 fMousePositionChanged = true;
4301 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
4302#if !defined(VBOXGUEST_MOUSE_NOTIFY_CAN_PREEMPT)
4303 if (pDevExt->pfnMouseNotifyCallback)
4304 pDevExt->pfnMouseNotifyCallback(pDevExt->pvMouseNotifyCallbackArg);
4305#endif
4306 }
4307
4308#ifdef VBOX_WITH_HGCM
4309 /*
4310 * The HGCM event/list is kind of different in that we evaluate all entries.
4311 */
4312 if (fEvents & VMMDEV_EVENT_HGCM)
4313 {
4314 PVBOXGUESTWAIT pWait;
4315 PVBOXGUESTWAIT pSafe;
4316 RTListForEachSafe(&pDevExt->HGCMWaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
4317 {
4318 if (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE)
4319 {
4320 pWait->fResEvents = VMMDEV_EVENT_HGCM;
4321 RTListNodeRemove(&pWait->ListNode);
4322# ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
4323 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
4324# else
4325 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
4326 rc |= RTSemEventMultiSignal(pWait->Event);
4327# endif
4328 }
4329 }
4330 fEvents &= ~VMMDEV_EVENT_HGCM;
4331 }
4332#endif
4333
4334 /*
4335 * Normal FIFO waiter evaluation.
4336 */
4337 rc |= vgdrvDispatchEventsLocked(pDevExt, fEvents);
4338 }
4339 else /* something is serious wrong... */
4340 Log(("VGDrvCommonISR: acknowledge events failed rc=%Rrc (events=%#x)!!\n",
4341 pReq->header.rc, pReq->events));
4342 }
4343 else
4344 Log3(("VGDrvCommonISR: not ours\n"));
4345
4346 RTSpinlockRelease(pDevExt->EventSpinlock);
4347
4348 /*
4349 * Execute the mouse notification callback here if it cannot be executed while
4350 * holding the interrupt safe spinlock, see @bugref{8639}.
4351 */
4352#if defined(VBOXGUEST_MOUSE_NOTIFY_CAN_PREEMPT) && !defined(RT_OS_WINDOWS) /* (Windows does this in the Dpc callback) */
4353 if ( fMousePositionChanged
4354 && pDevExt->pfnMouseNotifyCallback)
4355 pDevExt->pfnMouseNotifyCallback(pDevExt->pvMouseNotifyCallbackArg);
4356#endif
4357
4358#if defined(VBOXGUEST_USE_DEFERRED_WAKE_UP) && !defined(RT_OS_DARWIN) && !defined(RT_OS_WINDOWS)
4359 /*
4360 * Do wake-ups.
4361 * Note. On Windows this isn't possible at this IRQL, so a DPC will take
4362 * care of it. Same on darwin, doing it in the work loop callback.
4363 */
4364 VGDrvCommonWaitDoWakeUps(pDevExt);
4365#endif
4366
4367 /*
4368 * Work the poll and async notification queues on OSes that implements that.
4369 * (Do this outside the spinlock to prevent some recursive spinlocking.)
4370 */
4371 if (fMousePositionChanged)
4372 {
4373 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
4374 VGDrvNativeISRMousePollEvent(pDevExt);
4375 }
4376
4377 Assert(rc == 0);
4378 NOREF(rc);
4379 return fOurIrq;
4380}
4381
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette