VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 75547

Last change on this file since 75547 was 75547, checked in by vboxsync, 6 years ago

VBoxGuest: Added an IDC interface for faster HGCM calls (VBGL_IOCTL_IDC_HGCM_FAST_CALL, VbglR0HGCMFastCall). This expects the caller to construct a valid call and only concerns itself with issuing it to the host and waiting for the result to arrive. bugref:9172

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 162.4 KB
Line 
1/* $Id: VBoxGuest.cpp 75547 2018-11-18 04:50:34Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27/** @page pg_vbdrv VBoxGuest
28 *
29 * VBoxGuest is the device driver for VMMDev.
30 *
31 * The device driver is shipped as part of the guest additions. It has roots in
32 * the host VMM support driver (usually known as VBoxDrv), so fixes in platform
33 * specific code may apply to both drivers.
34 *
35 * The common code lives in VBoxGuest.cpp and is compiled both as C++ and C.
36 * The VBoxGuest.cpp source file shall not contain platform specific code,
37 * though it must occationally do a few \#ifdef RT_OS_XXX tests to cater for
38 * platform differences. Though, in those cases, it is common that more than
39 * one platform needs special handling.
40 *
41 * On most platforms the device driver should create two device nodes, one for
42 * full (unrestricted) access to the feature set, and one which only provides a
43 * restrict set of functions. These are generally referred to as 'vboxguest'
44 * and 'vboxuser' respectively. Currently, this two device approach is only
45 * implemented on Linux!
46 *
47 */
48
49
50/*********************************************************************************************************************************
51* Header Files *
52*********************************************************************************************************************************/
53#define LOG_GROUP LOG_GROUP_DEFAULT
54#include "VBoxGuestInternal.h"
55#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
56#include <VBox/log.h>
57#include <VBox/HostServices/GuestPropertySvc.h>
58#include <iprt/ctype.h>
59#include <iprt/mem.h>
60#include <iprt/time.h>
61#include <iprt/memobj.h>
62#include <iprt/asm.h>
63#include <iprt/asm-amd64-x86.h>
64#include <iprt/string.h>
65#include <iprt/process.h>
66#include <iprt/assert.h>
67#include <iprt/param.h>
68#include <iprt/timer.h>
69#ifdef VBOX_WITH_HGCM
70# include <iprt/thread.h>
71#endif
72#include "version-generated.h"
73#if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
74# include "revision-generated.h"
75#endif
76#if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
77# include <iprt/rand.h>
78#endif
79
80
81/*********************************************************************************************************************************
82* Defined Constants And Macros *
83*********************************************************************************************************************************/
84#define VBOXGUEST_ACQUIRE_STYLE_EVENTS (VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST | VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST)
85
86
87/*********************************************************************************************************************************
88* Internal Functions *
89*********************************************************************************************************************************/
90#ifdef VBOX_WITH_HGCM
91static DECLCALLBACK(int) vgdrvHgcmAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
92#endif
93static int vgdrvIoCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession);
94static void vgdrvBitUsageTrackerClear(PVBOXGUESTBITUSAGETRACER pTracker);
95static uint32_t vgdrvGetAllowedEventMaskForSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession);
96static int vgdrvResetEventFilterOnHost(PVBOXGUESTDEVEXT pDevExt, uint32_t fFixedEvents);
97static int vgdrvResetMouseStatusOnHost(PVBOXGUESTDEVEXT pDevExt);
98static int vgdrvResetCapabilitiesOnHost(PVBOXGUESTDEVEXT pDevExt);
99static int vgdrvSetSessionEventFilter(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
100 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination);
101static int vgdrvSetSessionMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
102 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination);
103static int vgdrvSetSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
104 uint32_t fOrMask, uint32_t fNoMask,
105 uint32_t *pfSessionCaps, uint32_t *pfGlobalCaps, bool fSessionTermination);
106static int vgdrvAcquireSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
107 uint32_t fOrMask, uint32_t fNotMask, uint32_t fFlags, bool fSessionTermination);
108static int vgdrvDispatchEventsLocked(PVBOXGUESTDEVEXT pDevExt, uint32_t fEvents);
109
110
111/*********************************************************************************************************************************
112* Global Variables *
113*********************************************************************************************************************************/
114static const uint32_t g_cbChangeMemBalloonReq = RT_UOFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
115
116#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
117/**
118 * Drag in the rest of IRPT since we share it with the
119 * rest of the kernel modules on Solaris.
120 */
121PFNRT g_apfnVBoxGuestIPRTDeps[] =
122{
123 /* VirtioNet */
124 (PFNRT)RTRandBytes,
125 /* RTSemMutex* */
126 (PFNRT)RTSemMutexCreate,
127 (PFNRT)RTSemMutexDestroy,
128 (PFNRT)RTSemMutexRequest,
129 (PFNRT)RTSemMutexRequestNoResume,
130 (PFNRT)RTSemMutexRequestDebug,
131 (PFNRT)RTSemMutexRequestNoResumeDebug,
132 (PFNRT)RTSemMutexRelease,
133 (PFNRT)RTSemMutexIsOwned,
134 NULL
135};
136#endif /* RT_OS_DARWIN || RT_OS_SOLARIS */
137
138
139/**
140 * Reserves memory in which the VMM can relocate any guest mappings
141 * that are floating around.
142 *
143 * This operation is a little bit tricky since the VMM might not accept
144 * just any address because of address clashes between the three contexts
145 * it operates in, so use a small stack to perform this operation.
146 *
147 * @returns VBox status code (ignored).
148 * @param pDevExt The device extension.
149 */
150static int vgdrvInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
151{
152 /*
153 * Query the required space.
154 */
155 VMMDevReqHypervisorInfo *pReq;
156 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
157 if (RT_FAILURE(rc))
158 return rc;
159 pReq->hypervisorStart = 0;
160 pReq->hypervisorSize = 0;
161 rc = VbglR0GRPerform(&pReq->header);
162 if (RT_FAILURE(rc)) /* this shouldn't happen! */
163 {
164 VbglR0GRFree(&pReq->header);
165 return rc;
166 }
167
168 /*
169 * The VMM will report back if there is nothing it wants to map, like for
170 * instance in VT-x and AMD-V mode.
171 */
172 if (pReq->hypervisorSize == 0)
173 Log(("vgdrvInitFixateGuestMappings: nothing to do\n"));
174 else
175 {
176 /*
177 * We have to try several times since the host can be picky
178 * about certain addresses.
179 */
180 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
181 uint32_t cbHypervisor = pReq->hypervisorSize;
182 RTR0MEMOBJ ahTries[5];
183 uint32_t iTry;
184 bool fBitched = false;
185 Log(("vgdrvInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
186 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
187 {
188 /*
189 * Reserve space, or if that isn't supported, create a object for
190 * some fictive physical memory and map that in to kernel space.
191 *
192 * To make the code a bit uglier, most systems cannot help with
193 * 4MB alignment, so we have to deal with that in addition to
194 * having two ways of getting the memory.
195 */
196 uint32_t uAlignment = _4M;
197 RTR0MEMOBJ hObj;
198 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
199 if (rc == VERR_NOT_SUPPORTED)
200 {
201 uAlignment = PAGE_SIZE;
202 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
203 }
204 /*
205 * If both RTR0MemObjReserveKernel calls above failed because either not supported or
206 * not implemented at all at the current platform, try to map the memory object into the
207 * virtual kernel space.
208 */
209 if (rc == VERR_NOT_SUPPORTED)
210 {
211 if (hFictive == NIL_RTR0MEMOBJ)
212 {
213 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
214 if (RT_FAILURE(rc))
215 break;
216 hFictive = hObj;
217 }
218 uAlignment = _4M;
219 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
220 if (rc == VERR_NOT_SUPPORTED)
221 {
222 uAlignment = PAGE_SIZE;
223 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
224 }
225 }
226 if (RT_FAILURE(rc))
227 {
228 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
229 rc, cbHypervisor, uAlignment, iTry));
230 fBitched = true;
231 break;
232 }
233
234 /*
235 * Try set it.
236 */
237 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
238 pReq->header.rc = VERR_INTERNAL_ERROR;
239 pReq->hypervisorSize = cbHypervisor;
240 pReq->hypervisorStart = (RTGCPTR32)(uintptr_t)RTR0MemObjAddress(hObj);
241 if ( uAlignment == PAGE_SIZE
242 && pReq->hypervisorStart & (_4M - 1))
243 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
244 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
245
246 rc = VbglR0GRPerform(&pReq->header);
247 if (RT_SUCCESS(rc))
248 {
249 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
250 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
251 RTR0MemObjAddress(pDevExt->hGuestMappings),
252 RTR0MemObjSize(pDevExt->hGuestMappings),
253 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
254 break;
255 }
256 ahTries[iTry] = hObj;
257 }
258
259 /*
260 * Cleanup failed attempts.
261 */
262 while (iTry-- > 0)
263 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
264 if ( RT_FAILURE(rc)
265 && hFictive != NIL_RTR0PTR)
266 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
267 if (RT_FAILURE(rc) && !fBitched)
268 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
269 }
270 VbglR0GRFree(&pReq->header);
271
272 /*
273 * We ignore failed attempts for now.
274 */
275 return VINF_SUCCESS;
276}
277
278
279/**
280 * Undo what vgdrvInitFixateGuestMappings did.
281 *
282 * @param pDevExt The device extension.
283 */
284static void vgdrvTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
285{
286 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
287 {
288 /*
289 * Tell the host that we're going to free the memory we reserved for
290 * it, the free it up. (Leak the memory if anything goes wrong here.)
291 */
292 VMMDevReqHypervisorInfo *pReq;
293 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
294 if (RT_SUCCESS(rc))
295 {
296 pReq->hypervisorStart = 0;
297 pReq->hypervisorSize = 0;
298 rc = VbglR0GRPerform(&pReq->header);
299 VbglR0GRFree(&pReq->header);
300 }
301 if (RT_SUCCESS(rc))
302 {
303 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
304 AssertRC(rc);
305 }
306 else
307 LogRel(("vgdrvTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
308
309 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
310 }
311}
312
313
314
315/**
316 * Report the guest information to the host.
317 *
318 * @returns IPRT status code.
319 * @param enmOSType The OS type to report.
320 */
321static int vgdrvReportGuestInfo(VBOXOSTYPE enmOSType)
322{
323 /*
324 * Allocate and fill in the two guest info reports.
325 */
326 VMMDevReportGuestInfo2 *pReqInfo2 = NULL;
327 VMMDevReportGuestInfo *pReqInfo1 = NULL;
328 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReqInfo2, sizeof (VMMDevReportGuestInfo2), VMMDevReq_ReportGuestInfo2);
329 Log(("vgdrvReportGuestInfo: VbglR0GRAlloc VMMDevReportGuestInfo2 completed with rc=%Rrc\n", rc));
330 if (RT_SUCCESS(rc))
331 {
332 pReqInfo2->guestInfo.additionsMajor = VBOX_VERSION_MAJOR;
333 pReqInfo2->guestInfo.additionsMinor = VBOX_VERSION_MINOR;
334 pReqInfo2->guestInfo.additionsBuild = VBOX_VERSION_BUILD;
335 pReqInfo2->guestInfo.additionsRevision = VBOX_SVN_REV;
336 pReqInfo2->guestInfo.additionsFeatures = VBOXGSTINFO2_F_REQUESTOR_INFO;
337 RTStrCopy(pReqInfo2->guestInfo.szName, sizeof(pReqInfo2->guestInfo.szName), VBOX_VERSION_STRING);
338
339 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReqInfo1, sizeof (VMMDevReportGuestInfo), VMMDevReq_ReportGuestInfo);
340 Log(("vgdrvReportGuestInfo: VbglR0GRAlloc VMMDevReportGuestInfo completed with rc=%Rrc\n", rc));
341 if (RT_SUCCESS(rc))
342 {
343 pReqInfo1->guestInfo.interfaceVersion = VMMDEV_VERSION;
344 pReqInfo1->guestInfo.osType = enmOSType;
345
346 /*
347 * There are two protocols here:
348 * 1. Info2 + Info1. Supported by >=3.2.51.
349 * 2. Info1 and optionally Info2. The old protocol.
350 *
351 * We try protocol 1 first. It will fail with VERR_NOT_SUPPORTED
352 * if not supported by the VMMDev (message ordering requirement).
353 */
354 rc = VbglR0GRPerform(&pReqInfo2->header);
355 Log(("vgdrvReportGuestInfo: VbglR0GRPerform VMMDevReportGuestInfo2 completed with rc=%Rrc\n", rc));
356 if (RT_SUCCESS(rc))
357 {
358 rc = VbglR0GRPerform(&pReqInfo1->header);
359 Log(("vgdrvReportGuestInfo: VbglR0GRPerform VMMDevReportGuestInfo completed with rc=%Rrc\n", rc));
360 }
361 else if ( rc == VERR_NOT_SUPPORTED
362 || rc == VERR_NOT_IMPLEMENTED)
363 {
364 rc = VbglR0GRPerform(&pReqInfo1->header);
365 Log(("vgdrvReportGuestInfo: VbglR0GRPerform VMMDevReportGuestInfo completed with rc=%Rrc\n", rc));
366 if (RT_SUCCESS(rc))
367 {
368 rc = VbglR0GRPerform(&pReqInfo2->header);
369 Log(("vgdrvReportGuestInfo: VbglR0GRPerform VMMDevReportGuestInfo2 completed with rc=%Rrc\n", rc));
370 if (rc == VERR_NOT_IMPLEMENTED)
371 rc = VINF_SUCCESS;
372 }
373 }
374 VbglR0GRFree(&pReqInfo1->header);
375 }
376 VbglR0GRFree(&pReqInfo2->header);
377 }
378
379 return rc;
380}
381
382
383/**
384 * Report the guest driver status to the host.
385 *
386 * @returns IPRT status code.
387 * @param fActive Flag whether the driver is now active or not.
388 */
389static int vgdrvReportDriverStatus(bool fActive)
390{
391 /*
392 * Report guest status of the VBox driver to the host.
393 */
394 VMMDevReportGuestStatus *pReq2 = NULL;
395 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq2, sizeof(*pReq2), VMMDevReq_ReportGuestStatus);
396 Log(("vgdrvReportDriverStatus: VbglR0GRAlloc VMMDevReportGuestStatus completed with rc=%Rrc\n", rc));
397 if (RT_SUCCESS(rc))
398 {
399 pReq2->guestStatus.facility = VBoxGuestFacilityType_VBoxGuestDriver;
400 pReq2->guestStatus.status = fActive ?
401 VBoxGuestFacilityStatus_Active
402 : VBoxGuestFacilityStatus_Inactive;
403 pReq2->guestStatus.flags = 0;
404 rc = VbglR0GRPerform(&pReq2->header);
405 Log(("vgdrvReportDriverStatus: VbglR0GRPerform VMMDevReportGuestStatus completed with fActive=%d, rc=%Rrc\n",
406 fActive ? 1 : 0, rc));
407 if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
408 rc = VINF_SUCCESS;
409 VbglR0GRFree(&pReq2->header);
410 }
411
412 return rc;
413}
414
415
416/** @name Memory Ballooning
417 * @{
418 */
419
420/**
421 * Inflate the balloon by one chunk represented by an R0 memory object.
422 *
423 * The caller owns the balloon mutex.
424 *
425 * @returns IPRT status code.
426 * @param pMemObj Pointer to the R0 memory object.
427 * @param pReq The pre-allocated request for performing the VMMDev call.
428 */
429static int vgdrvBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
430{
431 uint32_t iPage;
432 int rc;
433
434 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
435 {
436 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
437 pReq->aPhysPage[iPage] = phys;
438 }
439
440 pReq->fInflate = true;
441 pReq->header.size = g_cbChangeMemBalloonReq;
442 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
443
444 rc = VbglR0GRPerform(&pReq->header);
445 if (RT_FAILURE(rc))
446 LogRel(("vgdrvBalloonInflate: VbglR0GRPerform failed. rc=%Rrc\n", rc));
447 return rc;
448}
449
450
451/**
452 * Deflate the balloon by one chunk - info the host and free the memory object.
453 *
454 * The caller owns the balloon mutex.
455 *
456 * @returns IPRT status code.
457 * @param pMemObj Pointer to the R0 memory object.
458 * The memory object will be freed afterwards.
459 * @param pReq The pre-allocated request for performing the VMMDev call.
460 */
461static int vgdrvBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
462{
463 uint32_t iPage;
464 int rc;
465
466 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
467 {
468 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
469 pReq->aPhysPage[iPage] = phys;
470 }
471
472 pReq->fInflate = false;
473 pReq->header.size = g_cbChangeMemBalloonReq;
474 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
475
476 rc = VbglR0GRPerform(&pReq->header);
477 if (RT_FAILURE(rc))
478 {
479 LogRel(("vgdrvBalloonDeflate: VbglR0GRPerform failed. rc=%Rrc\n", rc));
480 return rc;
481 }
482
483 rc = RTR0MemObjFree(*pMemObj, true);
484 if (RT_FAILURE(rc))
485 {
486 LogRel(("vgdrvBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
487 return rc;
488 }
489
490 *pMemObj = NIL_RTR0MEMOBJ;
491 return VINF_SUCCESS;
492}
493
494
495/**
496 * Inflate/deflate the memory balloon and notify the host.
497 *
498 * This is a worker used by vgdrvIoCtl_CheckMemoryBalloon - it takes the mutex.
499 *
500 * @returns VBox status code.
501 * @param pDevExt The device extension.
502 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
503 * @param pfHandleInR3 Where to return the handle-in-ring3 indicator
504 * (VINF_SUCCESS if set).
505 */
506static int vgdrvSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, bool *pfHandleInR3)
507{
508 int rc = VINF_SUCCESS;
509
510 if (pDevExt->MemBalloon.fUseKernelAPI)
511 {
512 VMMDevChangeMemBalloon *pReq;
513 uint32_t i;
514
515 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
516 {
517 LogRel(("vgdrvSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
518 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
519 return VERR_INVALID_PARAMETER;
520 }
521
522 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
523 return VINF_SUCCESS; /* nothing to do */
524
525 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
526 && !pDevExt->MemBalloon.paMemObj)
527 {
528 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
529 if (!pDevExt->MemBalloon.paMemObj)
530 {
531 LogRel(("vgdrvSetBalloonSizeKernel: no memory for paMemObj!\n"));
532 return VERR_NO_MEMORY;
533 }
534 }
535
536 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, g_cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
537 if (RT_FAILURE(rc))
538 return rc;
539
540 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
541 {
542 /* inflate */
543 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
544 {
545 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
546 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
547 if (RT_FAILURE(rc))
548 {
549 if (rc == VERR_NOT_SUPPORTED)
550 {
551 /* not supported -- fall back to the R3-allocated memory. */
552 rc = VINF_SUCCESS;
553 pDevExt->MemBalloon.fUseKernelAPI = false;
554 Assert(pDevExt->MemBalloon.cChunks == 0);
555 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
556 }
557 /* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
558 * cannot allocate more memory => don't try further, just stop here */
559 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
560 break;
561 }
562
563 rc = vgdrvBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
564 if (RT_FAILURE(rc))
565 {
566 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
567 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
568 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
569 break;
570 }
571 pDevExt->MemBalloon.cChunks++;
572 }
573 }
574 else
575 {
576 /* deflate */
577 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
578 {
579 rc = vgdrvBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
580 if (RT_FAILURE(rc))
581 {
582 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
583 break;
584 }
585 pDevExt->MemBalloon.cChunks--;
586 }
587 }
588
589 VbglR0GRFree(&pReq->header);
590 }
591
592 /*
593 * Set the handle-in-ring3 indicator. When set Ring-3 will have to work
594 * the balloon changes via the other API.
595 */
596 *pfHandleInR3 = pDevExt->MemBalloon.fUseKernelAPI ? false : true;
597
598 return rc;
599}
600
601
602/**
603 * Inflate/deflate the balloon by one chunk.
604 *
605 * Worker for vgdrvIoCtl_ChangeMemoryBalloon - it takes the mutex.
606 *
607 * @returns VBox status code.
608 * @param pDevExt The device extension.
609 * @param pSession The session.
610 * @param pvChunk The address of the chunk to add to / remove from the
611 * balloon. (user space address)
612 * @param fInflate Inflate if true, deflate if false.
613 */
614static int vgdrvSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, RTR3PTR pvChunk, bool fInflate)
615{
616 VMMDevChangeMemBalloon *pReq;
617 PRTR0MEMOBJ pMemObj = NULL;
618 int rc = VINF_SUCCESS;
619 uint32_t i;
620 RT_NOREF1(pSession);
621
622 if (fInflate)
623 {
624 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
625 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
626 {
627 LogRel(("vgdrvSetBalloonSizeFromUser: cannot inflate balloon, already have %u chunks (max=%u)\n",
628 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
629 return VERR_INVALID_PARAMETER;
630 }
631
632 if (!pDevExt->MemBalloon.paMemObj)
633 {
634 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
635 if (!pDevExt->MemBalloon.paMemObj)
636 {
637 LogRel(("vgdrvSetBalloonSizeFromUser: no memory for paMemObj!\n"));
638 return VERR_NO_MEMORY;
639 }
640 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
641 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
642 }
643 }
644 else
645 {
646 if (pDevExt->MemBalloon.cChunks == 0)
647 {
648 AssertMsgFailed(("vgdrvSetBalloonSizeFromUser: cannot decrease balloon, already at size 0\n"));
649 return VERR_INVALID_PARAMETER;
650 }
651 }
652
653 /*
654 * Enumerate all memory objects and check if the object is already registered.
655 */
656 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
657 {
658 if ( fInflate
659 && !pMemObj
660 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
661 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
662 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == pvChunk)
663 {
664 if (fInflate)
665 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
666 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
667 break;
668 }
669 }
670 if (!pMemObj)
671 {
672 if (fInflate)
673 {
674 /* no free object pointer found -- should not happen */
675 return VERR_NO_MEMORY;
676 }
677
678 /* cannot free this memory as it wasn't provided before */
679 return VERR_NOT_FOUND;
680 }
681
682 /*
683 * Try inflate / default the balloon as requested.
684 */
685 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, g_cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
686 if (RT_FAILURE(rc))
687 return rc;
688 pReq->header.fRequestor = pSession->fRequestor;
689
690 if (fInflate)
691 {
692 rc = RTR0MemObjLockUser(pMemObj, pvChunk, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
693 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
694 if (RT_SUCCESS(rc))
695 {
696 rc = vgdrvBalloonInflate(pMemObj, pReq);
697 if (RT_SUCCESS(rc))
698 pDevExt->MemBalloon.cChunks++;
699 else
700 {
701 Log(("vgdrvSetBalloonSizeFromUser(inflate): failed, rc=%Rrc!\n", rc));
702 RTR0MemObjFree(*pMemObj, true);
703 *pMemObj = NIL_RTR0MEMOBJ;
704 }
705 }
706 }
707 else
708 {
709 rc = vgdrvBalloonDeflate(pMemObj, pReq);
710 if (RT_SUCCESS(rc))
711 pDevExt->MemBalloon.cChunks--;
712 else
713 Log(("vgdrvSetBalloonSizeFromUser(deflate): failed, rc=%Rrc!\n", rc));
714 }
715
716 VbglR0GRFree(&pReq->header);
717 return rc;
718}
719
720
721/**
722 * Cleanup the memory balloon of a session.
723 *
724 * Will request the balloon mutex, so it must be valid and the caller must not
725 * own it already.
726 *
727 * @param pDevExt The device extension.
728 * @param pSession The session. Can be NULL at unload.
729 */
730static void vgdrvCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
731{
732 RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
733 if ( pDevExt->MemBalloon.pOwner == pSession
734 || pSession == NULL /*unload*/)
735 {
736 if (pDevExt->MemBalloon.paMemObj)
737 {
738 VMMDevChangeMemBalloon *pReq;
739 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, g_cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
740 if (RT_SUCCESS(rc))
741 {
742 /* fRequestor is kernel here, as we're cleaning up. */
743
744 uint32_t i;
745 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
746 {
747 rc = vgdrvBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
748 if (RT_FAILURE(rc))
749 {
750 LogRel(("vgdrvCloseMemBalloon: Deflate failed with rc=%Rrc. Will leak %u chunks.\n",
751 rc, pDevExt->MemBalloon.cChunks));
752 break;
753 }
754 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
755 pDevExt->MemBalloon.cChunks--;
756 }
757 VbglR0GRFree(&pReq->header);
758 }
759 else
760 LogRel(("vgdrvCloseMemBalloon: Failed to allocate VMMDev request buffer (rc=%Rrc). Will leak %u chunks.\n",
761 rc, pDevExt->MemBalloon.cChunks));
762 RTMemFree(pDevExt->MemBalloon.paMemObj);
763 pDevExt->MemBalloon.paMemObj = NULL;
764 }
765
766 pDevExt->MemBalloon.pOwner = NULL;
767 }
768 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
769}
770
771/** @} */
772
773
774
775/** @name Heartbeat
776 * @{
777 */
778
779/**
780 * Sends heartbeat to host.
781 *
782 * @returns VBox status code.
783 */
784static int vgdrvHeartbeatSend(PVBOXGUESTDEVEXT pDevExt)
785{
786 int rc;
787 if (pDevExt->pReqGuestHeartbeat)
788 {
789 rc = VbglR0GRPerform(pDevExt->pReqGuestHeartbeat);
790 Log3(("vgdrvHeartbeatSend: VbglR0GRPerform vgdrvHeartbeatSend completed with rc=%Rrc\n", rc));
791 }
792 else
793 rc = VERR_INVALID_STATE;
794 return rc;
795}
796
797
798/**
799 * Callback for heartbeat timer.
800 */
801static DECLCALLBACK(void) vgdrvHeartbeatTimerHandler(PRTTIMER hTimer, void *pvUser, uint64_t iTick)
802{
803 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
804 int rc;
805 AssertReturnVoid(pDevExt);
806
807 rc = vgdrvHeartbeatSend(pDevExt);
808 if (RT_FAILURE(rc))
809 Log(("HB Timer: vgdrvHeartbeatSend failed: rc=%Rrc\n", rc));
810
811 NOREF(hTimer); NOREF(iTick);
812}
813
814
815/**
816 * Configure the host to check guest's heartbeat
817 * and get heartbeat interval from the host.
818 *
819 * @returns VBox status code.
820 * @param pDevExt The device extension.
821 * @param fEnabled Set true to enable guest heartbeat checks on host.
822 */
823static int vgdrvHeartbeatHostConfigure(PVBOXGUESTDEVEXT pDevExt, bool fEnabled)
824{
825 VMMDevReqHeartbeat *pReq;
826 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_HeartbeatConfigure);
827 Log(("vgdrvHeartbeatHostConfigure: VbglR0GRAlloc vgdrvHeartbeatHostConfigure completed with rc=%Rrc\n", rc));
828 if (RT_SUCCESS(rc))
829 {
830 pReq->fEnabled = fEnabled;
831 pReq->cNsInterval = 0;
832 rc = VbglR0GRPerform(&pReq->header);
833 Log(("vgdrvHeartbeatHostConfigure: VbglR0GRPerform vgdrvHeartbeatHostConfigure completed with rc=%Rrc\n", rc));
834 pDevExt->cNsHeartbeatInterval = pReq->cNsInterval;
835 VbglR0GRFree(&pReq->header);
836 }
837 return rc;
838}
839
840
841/**
842 * Initializes the heartbeat timer.
843 *
844 * This feature may be disabled by the host.
845 *
846 * @returns VBox status (ignored).
847 * @param pDevExt The device extension.
848 */
849static int vgdrvHeartbeatInit(PVBOXGUESTDEVEXT pDevExt)
850{
851 /*
852 * Make sure that heartbeat checking is disabled.
853 */
854 int rc = vgdrvHeartbeatHostConfigure(pDevExt, false);
855 if (RT_SUCCESS(rc))
856 {
857 rc = vgdrvHeartbeatHostConfigure(pDevExt, true);
858 if (RT_SUCCESS(rc))
859 {
860 /*
861 * Preallocate the request to use it from the timer callback because:
862 * 1) on Windows VbglR0GRAlloc must be called at IRQL <= APC_LEVEL
863 * and the timer callback runs at DISPATCH_LEVEL;
864 * 2) avoid repeated allocations.
865 */
866 rc = VbglR0GRAlloc(&pDevExt->pReqGuestHeartbeat, sizeof(*pDevExt->pReqGuestHeartbeat), VMMDevReq_GuestHeartbeat);
867 if (RT_SUCCESS(rc))
868 {
869 LogRel(("vgdrvHeartbeatInit: Setting up heartbeat to trigger every %RU64 milliseconds\n",
870 pDevExt->cNsHeartbeatInterval / RT_NS_1MS));
871 rc = RTTimerCreateEx(&pDevExt->pHeartbeatTimer, pDevExt->cNsHeartbeatInterval, 0 /*fFlags*/,
872 (PFNRTTIMER)vgdrvHeartbeatTimerHandler, pDevExt);
873 if (RT_SUCCESS(rc))
874 {
875 rc = RTTimerStart(pDevExt->pHeartbeatTimer, 0);
876 if (RT_SUCCESS(rc))
877 return VINF_SUCCESS;
878
879 LogRel(("vgdrvHeartbeatInit: Heartbeat timer failed to start, rc=%Rrc\n", rc));
880 }
881 else
882 LogRel(("vgdrvHeartbeatInit: Failed to create heartbeat timer: %Rrc\n", rc));
883
884 VbglR0GRFree(pDevExt->pReqGuestHeartbeat);
885 pDevExt->pReqGuestHeartbeat = NULL;
886 }
887 else
888 LogRel(("vgdrvHeartbeatInit: VbglR0GRAlloc(VMMDevReq_GuestHeartbeat): %Rrc\n", rc));
889
890 LogRel(("vgdrvHeartbeatInit: Failed to set up the timer, guest heartbeat is disabled\n"));
891 vgdrvHeartbeatHostConfigure(pDevExt, false);
892 }
893 else
894 LogRel(("vgdrvHeartbeatInit: Failed to configure host for heartbeat checking: rc=%Rrc\n", rc));
895 }
896 return rc;
897}
898
899/** @} */
900
901
902/**
903 * Helper to reinit the VMMDev communication after hibernation.
904 *
905 * @returns VBox status code.
906 * @param pDevExt The device extension.
907 * @param enmOSType The OS type.
908 *
909 * @todo Call this on all platforms, not just windows.
910 */
911int VGDrvCommonReinitDevExtAfterHibernation(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
912{
913 int rc = vgdrvReportGuestInfo(enmOSType);
914 if (RT_SUCCESS(rc))
915 {
916 rc = vgdrvReportDriverStatus(true /* Driver is active */);
917 if (RT_FAILURE(rc))
918 Log(("VGDrvCommonReinitDevExtAfterHibernation: could not report guest driver status, rc=%Rrc\n", rc));
919 }
920 else
921 Log(("VGDrvCommonReinitDevExtAfterHibernation: could not report guest information to host, rc=%Rrc\n", rc));
922 LogFlow(("VGDrvCommonReinitDevExtAfterHibernation: returned with rc=%Rrc\n", rc));
923 RT_NOREF1(pDevExt);
924 return rc;
925}
926
927
928/**
929 * Initializes the release logger (debug is implicit), if configured.
930 *
931 * @returns IPRT status code.
932 */
933int VGDrvCommonInitLoggers(void)
934{
935#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
936 /*
937 * Create the release log.
938 */
939 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
940 PRTLOGGER pRelLogger;
941 int rc = RTLogCreate(&pRelLogger, 0 /*fFlags*/, "all", "VBOXGUEST_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups,
942 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
943 if (RT_SUCCESS(rc))
944 RTLogRelSetDefaultInstance(pRelLogger);
945 /** @todo Add native hook for getting logger config parameters and setting
946 * them. On linux we should use the module parameter stuff... */
947 return rc;
948#else
949 return VINF_SUCCESS;
950#endif
951}
952
953
954/**
955 * Destroys the loggers.
956 */
957void VGDrvCommonDestroyLoggers(void)
958{
959#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
960 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
961 RTLogDestroy(RTLogSetDefaultInstance(NULL));
962#endif
963}
964
965
966/**
967 * Initialize the device extension fundament.
968 *
969 * There are no device resources at this point, VGDrvCommonInitDevExtResources
970 * should be called when they are available.
971 *
972 * @returns VBox status code.
973 * @param pDevExt The device extension to init.
974 */
975int VGDrvCommonInitDevExtFundament(PVBOXGUESTDEVEXT pDevExt)
976{
977 int rc;
978 AssertMsg( pDevExt->uInitState != VBOXGUESTDEVEXT_INIT_STATE_FUNDAMENT
979 && pDevExt->uInitState != VBOXGUESTDEVEXT_INIT_STATE_RESOURCES, ("uInitState=%#x\n", pDevExt->uInitState));
980
981 /*
982 * Initialize the data.
983 */
984 pDevExt->IOPortBase = UINT16_MAX;
985 pDevExt->pVMMDevMemory = NULL;
986 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
987 pDevExt->EventSpinlock = NIL_RTSPINLOCK;
988 pDevExt->pIrqAckEvents = NULL;
989 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
990 RTListInit(&pDevExt->WaitList);
991#ifdef VBOX_WITH_HGCM
992 RTListInit(&pDevExt->HGCMWaitList);
993#endif
994#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
995 RTListInit(&pDevExt->WakeUpList);
996#endif
997 RTListInit(&pDevExt->WokenUpList);
998 RTListInit(&pDevExt->FreeList);
999 RTListInit(&pDevExt->SessionList);
1000 pDevExt->cSessions = 0;
1001 pDevExt->fLoggingEnabled = false;
1002 pDevExt->f32PendingEvents = 0;
1003 pDevExt->u32MousePosChangedSeq = 0;
1004 pDevExt->SessionSpinlock = NIL_RTSPINLOCK;
1005 pDevExt->MemBalloon.hMtx = NIL_RTSEMFASTMUTEX;
1006 pDevExt->MemBalloon.cChunks = 0;
1007 pDevExt->MemBalloon.cMaxChunks = 0;
1008 pDevExt->MemBalloon.fUseKernelAPI = true;
1009 pDevExt->MemBalloon.paMemObj = NULL;
1010 pDevExt->MemBalloon.pOwner = NULL;
1011 pDevExt->pfnMouseNotifyCallback = NULL;
1012 pDevExt->pvMouseNotifyCallbackArg = NULL;
1013 pDevExt->pReqGuestHeartbeat = NULL;
1014
1015 pDevExt->fFixedEvents = 0;
1016 vgdrvBitUsageTrackerClear(&pDevExt->EventFilterTracker);
1017 pDevExt->fEventFilterHost = UINT32_MAX; /* forces a report */
1018
1019 vgdrvBitUsageTrackerClear(&pDevExt->MouseStatusTracker);
1020 pDevExt->fMouseStatusHost = UINT32_MAX; /* forces a report */
1021
1022 pDevExt->fAcquireModeGuestCaps = 0;
1023 pDevExt->fSetModeGuestCaps = 0;
1024 pDevExt->fAcquiredGuestCaps = 0;
1025 vgdrvBitUsageTrackerClear(&pDevExt->SetGuestCapsTracker);
1026 pDevExt->fGuestCapsHost = UINT32_MAX; /* forces a report */
1027
1028 /*
1029 * Create the wait and session spinlocks as well as the ballooning mutex.
1030 */
1031 rc = RTSpinlockCreate(&pDevExt->EventSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestEvent");
1032 if (RT_SUCCESS(rc))
1033 {
1034 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestSession");
1035 if (RT_SUCCESS(rc))
1036 {
1037 rc = RTSemFastMutexCreate(&pDevExt->MemBalloon.hMtx);
1038 if (RT_SUCCESS(rc))
1039 {
1040 pDevExt->uInitState = VBOXGUESTDEVEXT_INIT_STATE_FUNDAMENT;
1041 return VINF_SUCCESS;
1042 }
1043
1044 LogRel(("VGDrvCommonInitDevExt: failed to create mutex, rc=%Rrc!\n", rc));
1045 RTSpinlockDestroy(pDevExt->SessionSpinlock);
1046 }
1047 else
1048 LogRel(("VGDrvCommonInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
1049 RTSpinlockDestroy(pDevExt->EventSpinlock);
1050 }
1051 else
1052 LogRel(("VGDrvCommonInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
1053
1054 pDevExt->uInitState = 0;
1055 return rc;
1056}
1057
1058
1059/**
1060 * Counter to VGDrvCommonInitDevExtFundament.
1061 *
1062 * @param pDevExt The device extension.
1063 */
1064void VGDrvCommonDeleteDevExtFundament(PVBOXGUESTDEVEXT pDevExt)
1065{
1066 int rc2;
1067 AssertMsgReturnVoid(pDevExt->uInitState == VBOXGUESTDEVEXT_INIT_STATE_FUNDAMENT, ("uInitState=%#x\n", pDevExt->uInitState));
1068 pDevExt->uInitState = VBOXGUESTDEVEXT_INIT_STATE_DELETED;
1069
1070 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
1071 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
1072 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
1073}
1074
1075
1076/**
1077 * Initializes the VBoxGuest device extension resource parts.
1078 *
1079 * The native code locates the VMMDev on the PCI bus and retrieve the MMIO and
1080 * I/O port ranges, this function will take care of mapping the MMIO memory (if
1081 * present). Upon successful return the native code should set up the interrupt
1082 * handler.
1083 *
1084 * @returns VBox status code.
1085 *
1086 * @param pDevExt The device extension. Allocated by the native code.
1087 * @param IOPortBase The base of the I/O port range.
1088 * @param pvMMIOBase The base of the MMIO memory mapping.
1089 * This is optional, pass NULL if not present.
1090 * @param cbMMIO The size of the MMIO memory mapping.
1091 * This is optional, pass 0 if not present.
1092 * @param enmOSType The guest OS type to report to the VMMDev.
1093 * @param fFixedEvents Events that will be enabled upon init and no client
1094 * will ever be allowed to mask.
1095 */
1096int VGDrvCommonInitDevExtResources(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
1097 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
1098{
1099 int rc;
1100 AssertMsgReturn(pDevExt->uInitState == VBOXGUESTDEVEXT_INIT_STATE_FUNDAMENT, ("uInitState=%#x\n", pDevExt->uInitState),
1101 VERR_INVALID_STATE);
1102
1103 /*
1104 * If there is an MMIO region validate the version and size.
1105 */
1106 if (pvMMIOBase)
1107 {
1108 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
1109 Assert(cbMMIO);
1110 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
1111 && pVMMDev->u32Size >= 32
1112 && pVMMDev->u32Size <= cbMMIO)
1113 {
1114 pDevExt->pVMMDevMemory = pVMMDev;
1115 Log(("VGDrvCommonInitDevExtResources: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
1116 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
1117 }
1118 else /* try live without it. */
1119 LogRel(("VGDrvCommonInitDevExtResources: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
1120 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
1121 }
1122
1123 /*
1124 * Initialize the guest library and report the guest info back to VMMDev,
1125 * set the interrupt control filter mask, and fixate the guest mappings
1126 * made by the VMM.
1127 */
1128 pDevExt->IOPortBase = IOPortBase;
1129 rc = VbglR0InitPrimary(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
1130 if (RT_SUCCESS(rc))
1131 {
1132 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
1133 if (RT_SUCCESS(rc))
1134 {
1135 pDevExt->PhysIrqAckEvents = VbglR0PhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
1136 Assert(pDevExt->PhysIrqAckEvents != 0);
1137
1138 rc = vgdrvReportGuestInfo(enmOSType);
1139 if (RT_SUCCESS(rc))
1140 {
1141 /*
1142 * Set the fixed event and make sure the host doesn't have any lingering
1143 * the guest capabilities or mouse status bits set.
1144 */
1145#ifdef VBOX_WITH_HGCM
1146 fFixedEvents |= VMMDEV_EVENT_HGCM;
1147#endif
1148 pDevExt->fFixedEvents = fFixedEvents;
1149 rc = vgdrvResetEventFilterOnHost(pDevExt, fFixedEvents);
1150 if (RT_SUCCESS(rc))
1151 {
1152 rc = vgdrvResetCapabilitiesOnHost(pDevExt);
1153 if (RT_SUCCESS(rc))
1154 {
1155 rc = vgdrvResetMouseStatusOnHost(pDevExt);
1156 if (RT_SUCCESS(rc))
1157 {
1158 /*
1159 * Initialize stuff which may fail without requiring the driver init to fail.
1160 */
1161 vgdrvInitFixateGuestMappings(pDevExt);
1162 vgdrvHeartbeatInit(pDevExt);
1163
1164 /*
1165 * Done!
1166 */
1167 rc = vgdrvReportDriverStatus(true /* Driver is active */);
1168 if (RT_FAILURE(rc))
1169 LogRel(("VGDrvCommonInitDevExtResources: VBoxReportGuestDriverStatus failed, rc=%Rrc\n", rc));
1170
1171 pDevExt->uInitState = VBOXGUESTDEVEXT_INIT_STATE_RESOURCES;
1172 LogFlowFunc(("VGDrvCommonInitDevExtResources: returns success\n"));
1173 return VINF_SUCCESS;
1174 }
1175 LogRel(("VGDrvCommonInitDevExtResources: failed to clear mouse status: rc=%Rrc\n", rc));
1176 }
1177 else
1178 LogRel(("VGDrvCommonInitDevExtResources: failed to clear guest capabilities: rc=%Rrc\n", rc));
1179 }
1180 else
1181 LogRel(("VGDrvCommonInitDevExtResources: failed to set fixed event filter: rc=%Rrc\n", rc));
1182 pDevExt->fFixedEvents = 0;
1183 }
1184 else
1185 LogRel(("VGDrvCommonInitDevExtResources: vgdrvReportGuestInfo failed: rc=%Rrc\n", rc));
1186 VbglR0GRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
1187 }
1188 else
1189 LogRel(("VGDrvCommonInitDevExtResources: VbglR0GRAlloc failed: rc=%Rrc\n", rc));
1190
1191 VbglR0TerminatePrimary();
1192 }
1193 else
1194 LogRel(("VGDrvCommonInitDevExtResources: VbglR0InitPrimary failed: rc=%Rrc\n", rc));
1195 pDevExt->IOPortBase = UINT16_MAX;
1196 return rc;
1197}
1198
1199
1200/**
1201 * Deletes all the items in a wait chain.
1202 * @param pList The head of the chain.
1203 */
1204static void vgdrvDeleteWaitList(PRTLISTNODE pList)
1205{
1206 while (!RTListIsEmpty(pList))
1207 {
1208 int rc2;
1209 PVBOXGUESTWAIT pWait = RTListGetFirst(pList, VBOXGUESTWAIT, ListNode);
1210 RTListNodeRemove(&pWait->ListNode);
1211
1212 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
1213 pWait->Event = NIL_RTSEMEVENTMULTI;
1214 pWait->pSession = NULL;
1215 RTMemFree(pWait);
1216 }
1217}
1218
1219
1220/**
1221 * Counter to VGDrvCommonInitDevExtResources.
1222 *
1223 * @param pDevExt The device extension.
1224 */
1225void VGDrvCommonDeleteDevExtResources(PVBOXGUESTDEVEXT pDevExt)
1226{
1227 Log(("VGDrvCommonDeleteDevExtResources:\n"));
1228 AssertMsgReturnVoid(pDevExt->uInitState == VBOXGUESTDEVEXT_INIT_STATE_RESOURCES, ("uInitState=%#x\n", pDevExt->uInitState));
1229 pDevExt->uInitState = VBOXGUESTDEVEXT_INIT_STATE_FUNDAMENT;
1230
1231 /*
1232 * Stop and destroy HB timer and disable host heartbeat checking.
1233 */
1234 if (pDevExt->pHeartbeatTimer)
1235 {
1236 RTTimerDestroy(pDevExt->pHeartbeatTimer);
1237 vgdrvHeartbeatHostConfigure(pDevExt, false);
1238 }
1239
1240 VbglR0GRFree(pDevExt->pReqGuestHeartbeat);
1241 pDevExt->pReqGuestHeartbeat = NULL;
1242
1243 /*
1244 * Clean up the bits that involves the host first.
1245 */
1246 vgdrvTermUnfixGuestMappings(pDevExt);
1247 if (!RTListIsEmpty(&pDevExt->SessionList))
1248 {
1249 LogRelFunc(("session list not empty!\n"));
1250 RTListInit(&pDevExt->SessionList);
1251 }
1252
1253 /*
1254 * Update the host flags (mouse status etc) not to reflect this session.
1255 */
1256 pDevExt->fFixedEvents = 0;
1257 vgdrvResetEventFilterOnHost(pDevExt, 0 /*fFixedEvents*/);
1258 vgdrvResetCapabilitiesOnHost(pDevExt);
1259 vgdrvResetMouseStatusOnHost(pDevExt);
1260
1261 vgdrvCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
1262
1263 /*
1264 * Cleanup all the other resources.
1265 */
1266 vgdrvDeleteWaitList(&pDevExt->WaitList);
1267#ifdef VBOX_WITH_HGCM
1268 vgdrvDeleteWaitList(&pDevExt->HGCMWaitList);
1269#endif
1270#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1271 vgdrvDeleteWaitList(&pDevExt->WakeUpList);
1272#endif
1273 vgdrvDeleteWaitList(&pDevExt->WokenUpList);
1274 vgdrvDeleteWaitList(&pDevExt->FreeList);
1275
1276 VbglR0TerminatePrimary();
1277
1278
1279 pDevExt->pVMMDevMemory = NULL;
1280 pDevExt->IOPortBase = 0;
1281 pDevExt->pIrqAckEvents = NULL; /* Freed by VbglR0TerminatePrimary. */
1282}
1283
1284
1285/**
1286 * Initializes the VBoxGuest device extension when the device driver is loaded.
1287 *
1288 * The native code locates the VMMDev on the PCI bus and retrieve the MMIO and
1289 * I/O port ranges, this function will take care of mapping the MMIO memory (if
1290 * present). Upon successful return the native code should set up the interrupt
1291 * handler.
1292 *
1293 * Instead of calling this method, the host specific code choose to perform a
1294 * more granular initialization using:
1295 * 1. VGDrvCommonInitLoggers
1296 * 2. VGDrvCommonInitDevExtFundament
1297 * 3. VGDrvCommonInitDevExtResources
1298 *
1299 * @returns VBox status code.
1300 *
1301 * @param pDevExt The device extension. Allocated by the native code.
1302 * @param IOPortBase The base of the I/O port range.
1303 * @param pvMMIOBase The base of the MMIO memory mapping.
1304 * This is optional, pass NULL if not present.
1305 * @param cbMMIO The size of the MMIO memory mapping.
1306 * This is optional, pass 0 if not present.
1307 * @param enmOSType The guest OS type to report to the VMMDev.
1308 * @param fFixedEvents Events that will be enabled upon init and no client
1309 * will ever be allowed to mask.
1310 */
1311int VGDrvCommonInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
1312 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
1313{
1314 int rc;
1315 VGDrvCommonInitLoggers();
1316
1317 rc = VGDrvCommonInitDevExtFundament(pDevExt);
1318 if (RT_SUCCESS(rc))
1319 {
1320 rc = VGDrvCommonInitDevExtResources(pDevExt, IOPortBase, pvMMIOBase, cbMMIO, enmOSType, fFixedEvents);
1321 if (RT_SUCCESS(rc))
1322 return rc;
1323
1324 VGDrvCommonDeleteDevExtFundament(pDevExt);
1325 }
1326 VGDrvCommonDestroyLoggers();
1327 return rc; /* (failed) */
1328}
1329
1330
1331/**
1332 * Checks if the given option can be taken to not mean 'false'.
1333 *
1334 * @returns true or false accordingly.
1335 * @param pszValue The value to consider.
1336 */
1337bool VBDrvCommonIsOptionValueTrue(const char *pszValue)
1338{
1339 if (pszValue)
1340 {
1341 char ch;
1342 while ( (ch = *pszValue) != '\0'
1343 && RT_C_IS_SPACE(ch))
1344 pszValue++;
1345
1346 return ch != '\0'
1347 && ch != 'n' /* no */
1348 && ch != 'N' /* NO */
1349 && ch != 'd' /* disabled */
1350 && ch != 'f' /* false*/
1351 && ch != 'F' /* FALSE */
1352 && ch != 'D' /* DISABLED */
1353 && ( (ch != 'o' && ch != 'O') /* off, OFF, Off */
1354 || (pszValue[1] != 'f' && pszValue[1] != 'F') )
1355 && (ch != '0' || pszValue[1] != '\0') /* '0' */
1356 ;
1357 }
1358 return false;
1359}
1360
1361
1362/**
1363 * Processes a option.
1364 *
1365 * This will let the OS specific code have a go at it too.
1366 *
1367 * @param pDevExt The device extension.
1368 * @param pszName The option name, sans prefix.
1369 * @param pszValue The option value.
1370 */
1371void VGDrvCommonProcessOption(PVBOXGUESTDEVEXT pDevExt, const char *pszName, const char *pszValue)
1372{
1373 Log(("VGDrvCommonProcessOption: pszName='%s' pszValue='%s'\n", pszName, pszValue));
1374
1375 if ( RTStrICmpAscii(pszName, "r3_log_to_host") == 0
1376 || RTStrICmpAscii(pszName, "LoggingEnabled") == 0 /*legacy*/ )
1377 pDevExt->fLoggingEnabled = VBDrvCommonIsOptionValueTrue(pszValue);
1378 else if ( RTStrNICmpAscii(pszName, RT_STR_TUPLE("log")) == 0
1379 || RTStrNICmpAscii(pszName, RT_STR_TUPLE("dbg_log")) == 0)
1380 {
1381 bool const fLogRel = *pszName == 'd' || *pszName == 'D';
1382 const char *pszSubName = &pszName[fLogRel ? 4 + 3 : 3];
1383 if ( !*pszSubName
1384 || RTStrICmpAscii(pszSubName, "_flags") == 0
1385 || RTStrICmpAscii(pszSubName, "_dest") == 0)
1386 {
1387 PRTLOGGER pLogger = fLogRel ? RTLogRelGetDefaultInstance() : RTLogDefaultInstance();
1388 if (pLogger)
1389 {
1390 if (!*pszSubName)
1391 RTLogGroupSettings(pLogger, pszValue);
1392 else if (RTStrICmpAscii(pszSubName, "_flags"))
1393 RTLogFlags(pLogger, pszValue);
1394 else
1395 RTLogDestinations(pLogger, pszValue);
1396 }
1397 }
1398 else if (!VGDrvNativeProcessOption(pDevExt, pszName, pszValue))
1399 LogRel(("VBoxGuest: Ignoring unknown option '%s' (value '%s')\n", pszName, pszValue));
1400 }
1401 else if (!VGDrvNativeProcessOption(pDevExt, pszName, pszValue))
1402 LogRel(("VBoxGuest: Ignoring unknown option '%s' (value '%s')\n", pszName, pszValue));
1403}
1404
1405
1406/**
1407 * Read driver configuration from the host.
1408 *
1409 * This involves connecting to the guest properties service, which means that
1410 * interrupts needs to work and that the calling thread must be able to block.
1411 *
1412 * @param pDevExt The device extension.
1413 */
1414void VGDrvCommonProcessOptionsFromHost(PVBOXGUESTDEVEXT pDevExt)
1415{
1416 /*
1417 * Create a kernel session without our selves, then connect to the HGCM service.
1418 */
1419 PVBOXGUESTSESSION pSession;
1420 int rc = VGDrvCommonCreateKernelSession(pDevExt, &pSession);
1421 if (RT_SUCCESS(rc))
1422 {
1423 union
1424 {
1425 VBGLIOCHGCMCONNECT Connect;
1426 VBGLIOCHGCMDISCONNECT Disconnect;
1427 GuestPropMsgEnumProperties EnumMsg;
1428 } uBuf;
1429
1430 RT_ZERO(uBuf.Connect);
1431 VBGLREQHDR_INIT(&uBuf.Connect.Hdr, HGCM_CONNECT);
1432 uBuf.Connect.u.In.Loc.type = VMMDevHGCMLoc_LocalHost_Existing;
1433 RTStrCopy(uBuf.Connect.u.In.Loc.u.host.achName, sizeof(uBuf.Connect.u.In.Loc.u.host.achName),
1434 "VBoxGuestPropSvc"); /** @todo Add a define to the header for the name. */
1435 rc = VGDrvCommonIoCtl(VBGL_IOCTL_HGCM_CONNECT, pDevExt, pSession, &uBuf.Connect.Hdr, sizeof(uBuf.Connect));
1436 if (RT_SUCCESS(rc))
1437 {
1438 static const char g_szzPattern[] = "/VirtualBox/GuestAdd/VBoxGuest/*\0";
1439 uint32_t const idClient = uBuf.Connect.u.Out.idClient;
1440 char *pszzStrings = NULL;
1441 uint32_t cbStrings;
1442
1443 /*
1444 * Enumerate all the relevant properties. We try with a 1KB buffer, but
1445 * will double it until we get what we want or go beyond 16KB.
1446 */
1447 for (cbStrings = _1K; cbStrings <= _16K; cbStrings *= 2)
1448 {
1449 pszzStrings = (char *)RTMemAllocZ(cbStrings);
1450 if (pszzStrings)
1451 {
1452 VBGL_HGCM_HDR_INIT(&uBuf.EnumMsg.hdr, idClient, GUEST_PROP_FN_ENUM_PROPS, 3);
1453
1454 uBuf.EnumMsg.patterns.type = VMMDevHGCMParmType_LinAddr;
1455 uBuf.EnumMsg.patterns.u.Pointer.size = sizeof(g_szzPattern);
1456 uBuf.EnumMsg.patterns.u.Pointer.u.linearAddr = (uintptr_t)g_szzPattern;
1457
1458 uBuf.EnumMsg.strings.type = VMMDevHGCMParmType_LinAddr;
1459 uBuf.EnumMsg.strings.u.Pointer.size = cbStrings;
1460 uBuf.EnumMsg.strings.u.Pointer.u.linearAddr = (uintptr_t)pszzStrings;
1461
1462 uBuf.EnumMsg.size.type = VMMDevHGCMParmType_32bit;
1463 uBuf.EnumMsg.size.u.value32 = 0;
1464
1465 rc = VGDrvCommonIoCtl(VBGL_IOCTL_HGCM_CALL(sizeof(uBuf.EnumMsg)), pDevExt, pSession,
1466 &uBuf.EnumMsg.hdr.Hdr, sizeof(uBuf.EnumMsg));
1467 if (RT_SUCCESS(rc))
1468 {
1469 if ( uBuf.EnumMsg.size.type == VMMDevHGCMParmType_32bit
1470 && uBuf.EnumMsg.size.u.value32 <= cbStrings
1471 && uBuf.EnumMsg.size.u.value32 > 0)
1472 cbStrings = uBuf.EnumMsg.size.u.value32;
1473 Log(("VGDrvCommonReadConfigurationFromHost: GUEST_PROP_FN_ENUM_PROPS -> %#x bytes (cbStrings=%#x)\n",
1474 uBuf.EnumMsg.size.u.value32, cbStrings));
1475 break;
1476 }
1477
1478 RTMemFree(pszzStrings);
1479 pszzStrings = NULL;
1480 }
1481 else
1482 {
1483 LogRel(("VGDrvCommonReadConfigurationFromHost: failed to allocate %#x bytes\n", cbStrings));
1484 break;
1485 }
1486 }
1487
1488 /*
1489 * Disconnect and destroy the session.
1490 */
1491 VBGLREQHDR_INIT(&uBuf.Disconnect.Hdr, HGCM_DISCONNECT);
1492 uBuf.Disconnect.u.In.idClient = idClient;
1493 VGDrvCommonIoCtl(VBGL_IOCTL_HGCM_DISCONNECT, pDevExt, pSession, &uBuf.Disconnect.Hdr, sizeof(uBuf.Disconnect));
1494
1495 VGDrvCommonCloseSession(pDevExt, pSession);
1496
1497 /*
1498 * Process the properties if we got any.
1499 *
1500 * The string buffer contains packed strings in groups of four - name, value,
1501 * timestamp (as a decimal string) and flags. It is terminated by four empty
1502 * strings. Layout:
1503 * Name\0Value\0Timestamp\0Flags\0
1504 */
1505 if (pszzStrings)
1506 {
1507 uint32_t off;
1508 for (off = 0; off < cbStrings; off++)
1509 {
1510 /*
1511 * Parse the four fields, checking that it's all plain ASCII w/o any control characters.
1512 */
1513 const char *apszFields[4] = { NULL, NULL, NULL, NULL };
1514 bool fValidFields = true;
1515 unsigned iField;
1516 for (iField = 0; iField < RT_ELEMENTS(apszFields); iField++)
1517 {
1518 apszFields[0] = &pszzStrings[off];
1519 while (off < cbStrings)
1520 {
1521 char ch = pszzStrings[off++];
1522 if ((unsigned)ch < 0x20U || (unsigned)ch > 0x7fU)
1523 {
1524 if (!ch)
1525 break;
1526 if (fValidFields)
1527 Log(("VGDrvCommonReadConfigurationFromHost: Invalid char %#x at %#x (field %u)\n",
1528 ch, off - 1, iField));
1529 fValidFields = false;
1530 }
1531 }
1532 }
1533 if ( off <= cbStrings
1534 && fValidFields
1535 && *apszFields[0] != '\0')
1536 {
1537 /*
1538 * Validate and convert the flags to integer, then process the option.
1539 */
1540 uint32_t fFlags = 0;
1541 rc = GuestPropValidateFlags(apszFields[3], &fFlags);
1542 if (RT_SUCCESS(rc))
1543 {
1544 if (fFlags & GUEST_PROP_F_RDONLYGUEST)
1545 {
1546 apszFields[0] += sizeof(g_szzPattern) - 2;
1547 VGDrvCommonProcessOption(pDevExt, apszFields[0], apszFields[1]);
1548 }
1549 else
1550 LogRel(("VBoxGuest: Ignoring '%s' as it does not have RDONLYGUEST set\n", apszFields[0]));
1551 }
1552 else
1553 LogRel(("VBoxGuest: Invalid flags '%s' for '%s': %Rrc\n", apszFields[2], apszFields[0], rc));
1554 }
1555 else if (off < cbStrings)
1556 {
1557 LogRel(("VBoxGuest: Malformed guest properties enum result!\n"));
1558 Log(("VBoxGuest: off=%#x cbStrings=%#x\n%.*Rhxd\n", off, cbStrings, cbStrings, pszzStrings));
1559 break;
1560 }
1561 else if (!fValidFields)
1562 LogRel(("VBoxGuest: Ignoring %.*Rhxs as it has invalid characters in one or more fields\n",
1563 (int)strlen(apszFields[0]), apszFields[0]));
1564 else
1565 break;
1566 }
1567
1568 RTMemFree(pszzStrings);
1569 }
1570 else
1571 LogRel(("VGDrvCommonReadConfigurationFromHost: failed to enumerate '%s': %Rrc\n", g_szzPattern, rc));
1572
1573 }
1574 else
1575 LogRel(("VGDrvCommonReadConfigurationFromHost: failed to connect: %Rrc\n", rc));
1576 }
1577 else
1578 LogRel(("VGDrvCommonReadConfigurationFromHost: failed to connect: %Rrc\n", rc));
1579}
1580
1581
1582/**
1583 * Destroys the VBoxGuest device extension.
1584 *
1585 * The native code should call this before the driver is loaded,
1586 * but don't call this on shutdown.
1587 *
1588 * @param pDevExt The device extension.
1589 */
1590void VGDrvCommonDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
1591{
1592 Log(("VGDrvCommonDeleteDevExt:\n"));
1593 Log(("VBoxGuest: The additions driver is terminating.\n"));
1594 VGDrvCommonDeleteDevExtResources(pDevExt);
1595 VGDrvCommonDeleteDevExtFundament(pDevExt);
1596 VGDrvCommonDestroyLoggers();
1597}
1598
1599
1600/**
1601 * Creates a VBoxGuest user session.
1602 *
1603 * The native code calls this when a ring-3 client opens the device.
1604 * Use VGDrvCommonCreateKernelSession when a ring-0 client connects.
1605 *
1606 * @returns VBox status code.
1607 * @param pDevExt The device extension.
1608 * @param fRequestor VMMDEV_REQUESTOR_XXX.
1609 * @param ppSession Where to store the session on success.
1610 */
1611int VGDrvCommonCreateUserSession(PVBOXGUESTDEVEXT pDevExt, uint32_t fRequestor, PVBOXGUESTSESSION *ppSession)
1612{
1613 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
1614 if (RT_UNLIKELY(!pSession))
1615 {
1616 LogRel(("VGDrvCommonCreateUserSession: no memory!\n"));
1617 return VERR_NO_MEMORY;
1618 }
1619
1620 pSession->Process = RTProcSelf();
1621 pSession->R0Process = RTR0ProcHandleSelf();
1622 pSession->pDevExt = pDevExt;
1623 pSession->fRequestor = fRequestor;
1624 pSession->fUserSession = RT_BOOL(fRequestor & VMMDEV_REQUESTOR_USER_DEVICE);
1625 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1626 RTListAppend(&pDevExt->SessionList, &pSession->ListNode);
1627 pDevExt->cSessions++;
1628 RTSpinlockRelease(pDevExt->SessionSpinlock);
1629
1630 *ppSession = pSession;
1631 LogFlow(("VGDrvCommonCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1632 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1633 return VINF_SUCCESS;
1634}
1635
1636
1637/**
1638 * Creates a VBoxGuest kernel session.
1639 *
1640 * The native code calls this when a ring-0 client connects to the device.
1641 * Use VGDrvCommonCreateUserSession when a ring-3 client opens the device.
1642 *
1643 * @returns VBox status code.
1644 * @param pDevExt The device extension.
1645 * @param ppSession Where to store the session on success.
1646 */
1647int VGDrvCommonCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
1648{
1649 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
1650 if (RT_UNLIKELY(!pSession))
1651 {
1652 LogRel(("VGDrvCommonCreateKernelSession: no memory!\n"));
1653 return VERR_NO_MEMORY;
1654 }
1655
1656 pSession->Process = NIL_RTPROCESS;
1657 pSession->R0Process = NIL_RTR0PROCESS;
1658 pSession->pDevExt = pDevExt;
1659 pSession->fRequestor = VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV_OTHER
1660 | VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN;
1661 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1662 RTListAppend(&pDevExt->SessionList, &pSession->ListNode);
1663 pDevExt->cSessions++;
1664 RTSpinlockRelease(pDevExt->SessionSpinlock);
1665
1666 *ppSession = pSession;
1667 LogFlow(("VGDrvCommonCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1668 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1669 return VINF_SUCCESS;
1670}
1671
1672
1673/**
1674 * Closes a VBoxGuest session.
1675 *
1676 * @param pDevExt The device extension.
1677 * @param pSession The session to close (and free).
1678 */
1679void VGDrvCommonCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1680{
1681#ifdef VBOX_WITH_HGCM
1682 unsigned i;
1683#endif
1684 LogFlow(("VGDrvCommonCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1685 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1686
1687 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1688 RTListNodeRemove(&pSession->ListNode);
1689 pDevExt->cSessions--;
1690 RTSpinlockRelease(pDevExt->SessionSpinlock);
1691 vgdrvAcquireSessionCapabilities(pDevExt, pSession, 0, UINT32_MAX, VBGL_IOC_AGC_FLAGS_DEFAULT, true /*fSessionTermination*/);
1692 vgdrvSetSessionCapabilities(pDevExt, pSession, 0 /*fOrMask*/, UINT32_MAX /*fNotMask*/,
1693 NULL /*pfSessionCaps*/, NULL /*pfGlobalCaps*/, true /*fSessionTermination*/);
1694 vgdrvSetSessionEventFilter(pDevExt, pSession, 0 /*fOrMask*/, UINT32_MAX /*fNotMask*/, true /*fSessionTermination*/);
1695 vgdrvSetSessionMouseStatus(pDevExt, pSession, 0 /*fOrMask*/, UINT32_MAX /*fNotMask*/, true /*fSessionTermination*/);
1696
1697 vgdrvIoCtl_CancelAllWaitEvents(pDevExt, pSession);
1698
1699#ifdef VBOX_WITH_HGCM
1700 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1701 if (pSession->aHGCMClientIds[i])
1702 {
1703 uint32_t idClient = pSession->aHGCMClientIds[i];
1704 pSession->aHGCMClientIds[i] = 0;
1705 Log(("VGDrvCommonCloseSession: disconnecting client id %#RX32\n", idClient));
1706 VbglR0HGCMInternalDisconnect(idClient, VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV,
1707 vgdrvHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1708 }
1709#endif
1710
1711 pSession->pDevExt = NULL;
1712 pSession->Process = NIL_RTPROCESS;
1713 pSession->R0Process = NIL_RTR0PROCESS;
1714 vgdrvCloseMemBalloon(pDevExt, pSession);
1715 RTMemFree(pSession);
1716}
1717
1718
1719/**
1720 * Allocates a wait-for-event entry.
1721 *
1722 * @returns The wait-for-event entry.
1723 * @param pDevExt The device extension.
1724 * @param pSession The session that's allocating this. Can be NULL.
1725 */
1726static PVBOXGUESTWAIT vgdrvWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1727{
1728 /*
1729 * Allocate it one way or the other.
1730 */
1731 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1732 if (pWait)
1733 {
1734 RTSpinlockAcquire(pDevExt->EventSpinlock);
1735
1736 pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1737 if (pWait)
1738 RTListNodeRemove(&pWait->ListNode);
1739
1740 RTSpinlockRelease(pDevExt->EventSpinlock);
1741 }
1742 if (!pWait)
1743 {
1744 int rc;
1745
1746 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1747 if (!pWait)
1748 {
1749 LogRelMax(32, ("vgdrvWaitAlloc: out-of-memory!\n"));
1750 return NULL;
1751 }
1752
1753 rc = RTSemEventMultiCreate(&pWait->Event);
1754 if (RT_FAILURE(rc))
1755 {
1756 LogRelMax(32, ("vgdrvWaitAlloc: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
1757 RTMemFree(pWait);
1758 return NULL;
1759 }
1760
1761 pWait->ListNode.pNext = NULL;
1762 pWait->ListNode.pPrev = NULL;
1763 }
1764
1765 /*
1766 * Zero members just as an precaution.
1767 */
1768 pWait->fReqEvents = 0;
1769 pWait->fResEvents = 0;
1770#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1771 pWait->fPendingWakeUp = false;
1772 pWait->fFreeMe = false;
1773#endif
1774 pWait->pSession = pSession;
1775#ifdef VBOX_WITH_HGCM
1776 pWait->pHGCMReq = NULL;
1777#endif
1778 RTSemEventMultiReset(pWait->Event);
1779 return pWait;
1780}
1781
1782
1783/**
1784 * Frees the wait-for-event entry.
1785 *
1786 * The caller must own the wait spinlock !
1787 * The entry must be in a list!
1788 *
1789 * @param pDevExt The device extension.
1790 * @param pWait The wait-for-event entry to free.
1791 */
1792static void vgdrvWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1793{
1794 pWait->fReqEvents = 0;
1795 pWait->fResEvents = 0;
1796#ifdef VBOX_WITH_HGCM
1797 pWait->pHGCMReq = NULL;
1798#endif
1799#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1800 Assert(!pWait->fFreeMe);
1801 if (pWait->fPendingWakeUp)
1802 pWait->fFreeMe = true;
1803 else
1804#endif
1805 {
1806 RTListNodeRemove(&pWait->ListNode);
1807 RTListAppend(&pDevExt->FreeList, &pWait->ListNode);
1808 }
1809}
1810
1811
1812/**
1813 * Frees the wait-for-event entry.
1814 *
1815 * @param pDevExt The device extension.
1816 * @param pWait The wait-for-event entry to free.
1817 */
1818static void vgdrvWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1819{
1820 RTSpinlockAcquire(pDevExt->EventSpinlock);
1821 vgdrvWaitFreeLocked(pDevExt, pWait);
1822 RTSpinlockRelease(pDevExt->EventSpinlock);
1823}
1824
1825
1826#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1827/**
1828 * Processes the wake-up list.
1829 *
1830 * All entries in the wake-up list gets signalled and moved to the woken-up
1831 * list.
1832 * At least on Windows this function can be invoked concurrently from
1833 * different VCPUs. So, be thread-safe.
1834 *
1835 * @param pDevExt The device extension.
1836 */
1837void VGDrvCommonWaitDoWakeUps(PVBOXGUESTDEVEXT pDevExt)
1838{
1839 if (!RTListIsEmpty(&pDevExt->WakeUpList))
1840 {
1841 RTSpinlockAcquire(pDevExt->EventSpinlock);
1842 for (;;)
1843 {
1844 int rc;
1845 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->WakeUpList, VBOXGUESTWAIT, ListNode);
1846 if (!pWait)
1847 break;
1848 /* Prevent other threads from accessing pWait when spinlock is released. */
1849 RTListNodeRemove(&pWait->ListNode);
1850
1851 pWait->fPendingWakeUp = true;
1852 RTSpinlockRelease(pDevExt->EventSpinlock);
1853
1854 rc = RTSemEventMultiSignal(pWait->Event);
1855 AssertRC(rc);
1856
1857 RTSpinlockAcquire(pDevExt->EventSpinlock);
1858 Assert(pWait->ListNode.pNext == NULL && pWait->ListNode.pPrev == NULL);
1859 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1860 pWait->fPendingWakeUp = false;
1861 if (RT_LIKELY(!pWait->fFreeMe))
1862 { /* likely */ }
1863 else
1864 {
1865 pWait->fFreeMe = false;
1866 vgdrvWaitFreeLocked(pDevExt, pWait);
1867 }
1868 }
1869 RTSpinlockRelease(pDevExt->EventSpinlock);
1870 }
1871}
1872#endif /* VBOXGUEST_USE_DEFERRED_WAKE_UP */
1873
1874
1875/**
1876 * Implements the fast (no input or output) type of IOCtls.
1877 *
1878 * This is currently just a placeholder stub inherited from the support driver code.
1879 *
1880 * @returns VBox status code.
1881 * @param iFunction The IOCtl function number.
1882 * @param pDevExt The device extension.
1883 * @param pSession The session.
1884 */
1885int VGDrvCommonIoCtlFast(uintptr_t iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1886{
1887 LogFlow(("VGDrvCommonIoCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1888
1889 NOREF(iFunction);
1890 NOREF(pDevExt);
1891 NOREF(pSession);
1892 return VERR_NOT_SUPPORTED;
1893}
1894
1895
1896/**
1897 * Gets the driver I/O control interface version, maybe adjusting it for
1898 * backwards compatibility.
1899 *
1900 * The adjusting is currently not implemented as we only have one major I/O
1901 * control interface version out there to support. This is something we will
1902 * implement as needed.
1903 *
1904 * returns IPRT status code.
1905 * @param pDevExt The device extension.
1906 * @param pSession The session.
1907 * @param pReq The request info.
1908 */
1909static int vgdrvIoCtl_DriverVersionInfo(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCDRIVERVERSIONINFO pReq)
1910{
1911 int rc;
1912 LogFlow(("VBGL_IOCTL_DRIVER_VERSION_INFO: uReqVersion=%#x uMinVersion=%#x uReserved1=%#x uReserved2=%#x\n",
1913 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, pReq->u.In.uReserved1, pReq->u.In.uReserved2));
1914 RT_NOREF2(pDevExt, pSession);
1915
1916 /*
1917 * Input validation.
1918 */
1919 if ( pReq->u.In.uMinVersion <= pReq->u.In.uReqVersion
1920 && RT_HI_U16(pReq->u.In.uMinVersion) == RT_HI_U16(pReq->u.In.uReqVersion))
1921 {
1922 /*
1923 * Match the version.
1924 * The current logic is very simple, match the major interface version.
1925 */
1926 if ( pReq->u.In.uMinVersion <= VBGL_IOC_VERSION
1927 && RT_HI_U16(pReq->u.In.uMinVersion) == RT_HI_U16(VBGL_IOC_VERSION))
1928 rc = VINF_SUCCESS;
1929 else
1930 {
1931 LogRel(("VBGL_IOCTL_DRIVER_VERSION_INFO: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1932 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, VBGL_IOC_VERSION));
1933 rc = VERR_VERSION_MISMATCH;
1934 }
1935 }
1936 else
1937 {
1938 LogRel(("VBGL_IOCTL_DRIVER_VERSION_INFO: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
1939 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
1940 rc = VERR_INVALID_PARAMETER;
1941 }
1942
1943 pReq->u.Out.uSessionVersion = RT_SUCCESS(rc) ? VBGL_IOC_VERSION : UINT32_MAX;
1944 pReq->u.Out.uDriverVersion = VBGL_IOC_VERSION;
1945 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1946 pReq->u.Out.uReserved1 = 0;
1947 pReq->u.Out.uReserved2 = 0;
1948 return rc;
1949}
1950
1951
1952/**
1953 * Similar to vgdrvIoCtl_DriverVersionInfo, except its for IDC.
1954 *
1955 * returns IPRT status code.
1956 * @param pDevExt The device extension.
1957 * @param pSession The session.
1958 * @param pReq The request info.
1959 */
1960static int vgdrvIoCtl_IdcConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCIDCCONNECT pReq)
1961{
1962 int rc;
1963 LogFlow(("VBGL_IOCTL_IDC_CONNECT: u32MagicCookie=%#x uReqVersion=%#x uMinVersion=%#x uReserved=%#x\n",
1964 pReq->u.In.u32MagicCookie, pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, pReq->u.In.uReserved));
1965 Assert(pSession != NULL);
1966 RT_NOREF(pDevExt);
1967
1968 /*
1969 * Input validation.
1970 */
1971 if (pReq->u.In.u32MagicCookie == VBGL_IOCTL_IDC_CONNECT_MAGIC_COOKIE)
1972 {
1973 if ( pReq->u.In.uMinVersion <= pReq->u.In.uReqVersion
1974 && RT_HI_U16(pReq->u.In.uMinVersion) == RT_HI_U16(pReq->u.In.uReqVersion))
1975 {
1976 /*
1977 * Match the version.
1978 * The current logic is very simple, match the major interface version.
1979 */
1980 if ( pReq->u.In.uMinVersion <= VBGL_IOC_VERSION
1981 && RT_HI_U16(pReq->u.In.uMinVersion) == RT_HI_U16(VBGL_IOC_VERSION))
1982 {
1983 pReq->u.Out.pvSession = pSession;
1984 pReq->u.Out.uSessionVersion = VBGL_IOC_VERSION;
1985 pReq->u.Out.uDriverVersion = VBGL_IOC_VERSION;
1986 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1987 pReq->u.Out.uReserved1 = 0;
1988 pReq->u.Out.pvReserved2 = NULL;
1989 return VINF_SUCCESS;
1990
1991 }
1992 LogRel(("VBGL_IOCTL_IDC_CONNECT: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1993 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, VBGL_IOC_VERSION));
1994 rc = VERR_VERSION_MISMATCH;
1995 }
1996 else
1997 {
1998 LogRel(("VBGL_IOCTL_IDC_CONNECT: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
1999 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
2000 rc = VERR_INVALID_PARAMETER;
2001 }
2002
2003 pReq->u.Out.pvSession = NULL;
2004 pReq->u.Out.uSessionVersion = UINT32_MAX;
2005 pReq->u.Out.uDriverVersion = VBGL_IOC_VERSION;
2006 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
2007 pReq->u.Out.uReserved1 = 0;
2008 pReq->u.Out.pvReserved2 = NULL;
2009 }
2010 else
2011 {
2012 LogRel(("VBGL_IOCTL_IDC_CONNECT: u32MagicCookie=%#x expected %#x!\n",
2013 pReq->u.In.u32MagicCookie, VBGL_IOCTL_IDC_CONNECT_MAGIC_COOKIE));
2014 rc = VERR_INVALID_PARAMETER;
2015 }
2016 return rc;
2017}
2018
2019
2020/**
2021 * Counterpart to vgdrvIoCtl_IdcConnect, destroys the session.
2022 *
2023 * returns IPRT status code.
2024 * @param pDevExt The device extension.
2025 * @param pSession The session.
2026 * @param pReq The request info.
2027 */
2028static int vgdrvIoCtl_IdcDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCIDCDISCONNECT pReq)
2029{
2030 LogFlow(("VBGL_IOCTL_IDC_DISCONNECT: pvSession=%p vs pSession=%p\n", pReq->u.In.pvSession, pSession));
2031 RT_NOREF(pDevExt);
2032 Assert(pSession != NULL);
2033
2034 if (pReq->u.In.pvSession == pSession)
2035 {
2036 VGDrvCommonCloseSession(pDevExt, pSession);
2037 return VINF_SUCCESS;
2038 }
2039 LogRel(("VBGL_IOCTL_IDC_DISCONNECT: In.pvSession=%p is not equal to pSession=%p!\n", pReq->u.In.pvSession, pSession));
2040 return VERR_INVALID_PARAMETER;
2041}
2042
2043
2044/**
2045 * Return the VMM device I/O info.
2046 *
2047 * returns IPRT status code.
2048 * @param pDevExt The device extension.
2049 * @param pInfo The request info.
2050 * @note Ring-0 only, caller checked.
2051 */
2052static int vgdrvIoCtl_GetVMMDevIoInfo(PVBOXGUESTDEVEXT pDevExt, PVBGLIOCGETVMMDEVIOINFO pInfo)
2053{
2054 LogFlow(("VBGL_IOCTL_GET_VMMDEV_IO_INFO\n"));
2055
2056 pInfo->u.Out.IoPort = pDevExt->IOPortBase;
2057 pInfo->u.Out.pvVmmDevMapping = pDevExt->pVMMDevMemory;
2058 pInfo->u.Out.auPadding[0] = 0;
2059#if HC_ARCH_BITS != 32
2060 pInfo->u.Out.auPadding[1] = 0;
2061 pInfo->u.Out.auPadding[2] = 0;
2062#endif
2063 return VINF_SUCCESS;
2064}
2065
2066
2067/**
2068 * Set the callback for the kernel mouse handler.
2069 *
2070 * returns IPRT status code.
2071 * @param pDevExt The device extension.
2072 * @param pNotify The new callback information.
2073 */
2074int vgdrvIoCtl_SetMouseNotifyCallback(PVBOXGUESTDEVEXT pDevExt, PVBGLIOCSETMOUSENOTIFYCALLBACK pNotify)
2075{
2076 LogFlow(("VBOXGUEST_IOCTL_SET_MOUSE_NOTIFY_CALLBACK: pfnNotify=%p pvUser=%p\n", pNotify->u.In.pfnNotify, pNotify->u.In.pvUser));
2077
2078#ifdef VBOXGUEST_MOUSE_NOTIFY_CAN_PREEMPT
2079 VGDrvNativeSetMouseNotifyCallback(pDevExt, pNotify);
2080#else
2081 RTSpinlockAcquire(pDevExt->EventSpinlock);
2082 pDevExt->pfnMouseNotifyCallback = pNotify->u.In.pfnNotify;
2083 pDevExt->pvMouseNotifyCallbackArg = pNotify->u.In.pvUser;
2084 RTSpinlockRelease(pDevExt->EventSpinlock);
2085#endif
2086 return VINF_SUCCESS;
2087}
2088
2089
2090/**
2091 * Worker vgdrvIoCtl_WaitEvent.
2092 *
2093 * The caller enters the spinlock, we leave it.
2094 *
2095 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
2096 */
2097DECLINLINE(int) vbdgCheckWaitEventCondition(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2098 PVBGLIOCWAITFOREVENTS pInfo, int iEvent, const uint32_t fReqEvents)
2099{
2100 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
2101 if (fMatches & VBOXGUEST_ACQUIRE_STYLE_EVENTS)
2102 fMatches &= vgdrvGetAllowedEventMaskForSession(pDevExt, pSession);
2103 if (fMatches || pSession->fPendingCancelWaitEvents)
2104 {
2105 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
2106 RTSpinlockRelease(pDevExt->EventSpinlock);
2107
2108 pInfo->u.Out.fEvents = fMatches;
2109 if (fReqEvents & ~((uint32_t)1 << iEvent))
2110 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x\n", pInfo->u.Out.fEvents));
2111 else
2112 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x/%d\n", pInfo->u.Out.fEvents, iEvent));
2113 pSession->fPendingCancelWaitEvents = false;
2114 return VINF_SUCCESS;
2115 }
2116
2117 RTSpinlockRelease(pDevExt->EventSpinlock);
2118 return VERR_TIMEOUT;
2119}
2120
2121
2122static int vgdrvIoCtl_WaitForEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2123 PVBGLIOCWAITFOREVENTS pInfo, bool fInterruptible)
2124{
2125 uint32_t const cMsTimeout = pInfo->u.In.cMsTimeOut;
2126 const uint32_t fReqEvents = pInfo->u.In.fEvents;
2127 uint32_t fResEvents;
2128 int iEvent;
2129 PVBOXGUESTWAIT pWait;
2130 int rc;
2131
2132 pInfo->u.Out.fEvents = 0; /* Note! This overwrites pInfo->u.In.* fields! */
2133
2134 /*
2135 * Copy and verify the input mask.
2136 */
2137 iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
2138 if (RT_UNLIKELY(iEvent < 0))
2139 {
2140 LogRel(("VBOXGUEST_IOCTL_WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
2141 return VERR_INVALID_PARAMETER;
2142 }
2143
2144 /*
2145 * Check the condition up front, before doing the wait-for-event allocations.
2146 */
2147 RTSpinlockAcquire(pDevExt->EventSpinlock);
2148 rc = vbdgCheckWaitEventCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
2149 if (rc == VINF_SUCCESS)
2150 return rc;
2151
2152 if (!cMsTimeout)
2153 {
2154 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns VERR_TIMEOUT\n"));
2155 return VERR_TIMEOUT;
2156 }
2157
2158 pWait = vgdrvWaitAlloc(pDevExt, pSession);
2159 if (!pWait)
2160 return VERR_NO_MEMORY;
2161 pWait->fReqEvents = fReqEvents;
2162
2163 /*
2164 * We've got the wait entry now, re-enter the spinlock and check for the condition.
2165 * If the wait condition is met, return.
2166 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
2167 */
2168 RTSpinlockAcquire(pDevExt->EventSpinlock);
2169 RTListAppend(&pDevExt->WaitList, &pWait->ListNode);
2170 rc = vbdgCheckWaitEventCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
2171 if (rc == VINF_SUCCESS)
2172 {
2173 vgdrvWaitFreeUnlocked(pDevExt, pWait);
2174 return rc;
2175 }
2176
2177 if (fInterruptible)
2178 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMsTimeout == UINT32_MAX ? RT_INDEFINITE_WAIT : cMsTimeout);
2179 else
2180 rc = RTSemEventMultiWait(pWait->Event, cMsTimeout == UINT32_MAX ? RT_INDEFINITE_WAIT : cMsTimeout);
2181
2182 /*
2183 * There is one special case here and that's when the semaphore is
2184 * destroyed upon device driver unload. This shouldn't happen of course,
2185 * but in case it does, just get out of here ASAP.
2186 */
2187 if (rc == VERR_SEM_DESTROYED)
2188 return rc;
2189
2190 /*
2191 * Unlink the wait item and dispose of it.
2192 */
2193 RTSpinlockAcquire(pDevExt->EventSpinlock);
2194 fResEvents = pWait->fResEvents;
2195 vgdrvWaitFreeLocked(pDevExt, pWait);
2196 RTSpinlockRelease(pDevExt->EventSpinlock);
2197
2198 /*
2199 * Now deal with the return code.
2200 */
2201 if ( fResEvents
2202 && fResEvents != UINT32_MAX)
2203 {
2204 pInfo->u.Out.fEvents = fResEvents;
2205 if (fReqEvents & ~((uint32_t)1 << iEvent))
2206 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x\n", pInfo->u.Out.fEvents));
2207 else
2208 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x/%d\n", pInfo->u.Out.fEvents, iEvent));
2209 rc = VINF_SUCCESS;
2210 }
2211 else if ( fResEvents == UINT32_MAX
2212 || rc == VERR_INTERRUPTED)
2213 {
2214 rc = VERR_INTERRUPTED;
2215 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns VERR_INTERRUPTED\n"));
2216 }
2217 else if (rc == VERR_TIMEOUT)
2218 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns VERR_TIMEOUT (2)\n"));
2219 else
2220 {
2221 if (RT_SUCCESS(rc))
2222 {
2223 LogRelMax(32, ("VBOXGUEST_IOCTL_WAITEVENT: returns %Rrc but no events!\n", rc));
2224 rc = VERR_INTERNAL_ERROR;
2225 }
2226 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %Rrc\n", rc));
2227 }
2228
2229 return rc;
2230}
2231
2232
2233/** @todo the semantics of this IoCtl have been tightened, so that no calls to
2234 * VBOXGUEST_IOCTL_WAITEVENT are allowed in a session after it has been
2235 * called. Change the code to make calls to VBOXGUEST_IOCTL_WAITEVENT made
2236 * after that to return VERR_INTERRUPTED or something appropriate. */
2237static int vgdrvIoCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2238{
2239 PVBOXGUESTWAIT pWait;
2240 PVBOXGUESTWAIT pSafe;
2241 int rc = 0;
2242 /* Was as least one WAITEVENT in process for this session? If not we
2243 * set a flag that the next call should be interrupted immediately. This
2244 * is needed so that a user thread can reliably interrupt another one in a
2245 * WAITEVENT loop. */
2246 bool fCancelledOne = false;
2247
2248 LogFlow(("VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS\n"));
2249
2250 /*
2251 * Walk the event list and wake up anyone with a matching session.
2252 */
2253 RTSpinlockAcquire(pDevExt->EventSpinlock);
2254 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
2255 {
2256 if (pWait->pSession == pSession)
2257 {
2258 fCancelledOne = true;
2259 pWait->fResEvents = UINT32_MAX;
2260 RTListNodeRemove(&pWait->ListNode);
2261#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2262 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
2263#else
2264 rc |= RTSemEventMultiSignal(pWait->Event);
2265 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
2266#endif
2267 }
2268 }
2269 if (!fCancelledOne)
2270 pSession->fPendingCancelWaitEvents = true;
2271 RTSpinlockRelease(pDevExt->EventSpinlock);
2272 Assert(rc == 0);
2273 NOREF(rc);
2274
2275#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2276 VGDrvCommonWaitDoWakeUps(pDevExt);
2277#endif
2278
2279 return VINF_SUCCESS;
2280}
2281
2282
2283/**
2284 * Checks if the VMM request is allowed in the context of the given session.
2285 *
2286 * @returns VINF_SUCCESS or VERR_PERMISSION_DENIED.
2287 * @param pDevExt The device extension.
2288 * @param pSession The calling session.
2289 * @param enmType The request type.
2290 * @param pReqHdr The request.
2291 */
2292static int vgdrvCheckIfVmmReqIsAllowed(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VMMDevRequestType enmType,
2293 VMMDevRequestHeader const *pReqHdr)
2294{
2295 /*
2296 * Categorize the request being made.
2297 */
2298 /** @todo This need quite some more work! */
2299 enum
2300 {
2301 kLevel_Invalid, kLevel_NoOne, kLevel_OnlyVBoxGuest, kLevel_OnlyKernel, kLevel_TrustedUsers, kLevel_AllUsers
2302 } enmRequired;
2303 RT_NOREF1(pDevExt);
2304
2305 switch (enmType)
2306 {
2307 /*
2308 * Deny access to anything we don't know or provide specialized I/O controls for.
2309 */
2310#ifdef VBOX_WITH_HGCM
2311 case VMMDevReq_HGCMConnect:
2312 case VMMDevReq_HGCMDisconnect:
2313# ifdef VBOX_WITH_64_BITS_GUESTS
2314 case VMMDevReq_HGCMCall32:
2315 case VMMDevReq_HGCMCall64:
2316# else
2317 case VMMDevReq_HGCMCall:
2318# endif /* VBOX_WITH_64_BITS_GUESTS */
2319 case VMMDevReq_HGCMCancel:
2320 case VMMDevReq_HGCMCancel2:
2321#endif /* VBOX_WITH_HGCM */
2322 case VMMDevReq_SetGuestCapabilities:
2323 default:
2324 enmRequired = kLevel_NoOne;
2325 break;
2326
2327 /*
2328 * There are a few things only this driver can do (and it doesn't use
2329 * the VMMRequst I/O control route anyway, but whatever).
2330 */
2331 case VMMDevReq_ReportGuestInfo:
2332 case VMMDevReq_ReportGuestInfo2:
2333 case VMMDevReq_GetHypervisorInfo:
2334 case VMMDevReq_SetHypervisorInfo:
2335 case VMMDevReq_RegisterPatchMemory:
2336 case VMMDevReq_DeregisterPatchMemory:
2337 case VMMDevReq_GetMemBalloonChangeRequest:
2338 enmRequired = kLevel_OnlyVBoxGuest;
2339 break;
2340
2341 /*
2342 * Trusted users apps only.
2343 */
2344 case VMMDevReq_QueryCredentials:
2345 case VMMDevReq_ReportCredentialsJudgement:
2346 case VMMDevReq_RegisterSharedModule:
2347 case VMMDevReq_UnregisterSharedModule:
2348 case VMMDevReq_WriteCoreDump:
2349 case VMMDevReq_GetCpuHotPlugRequest:
2350 case VMMDevReq_SetCpuHotPlugStatus:
2351 case VMMDevReq_CheckSharedModules:
2352 case VMMDevReq_GetPageSharingStatus:
2353 case VMMDevReq_DebugIsPageShared:
2354 case VMMDevReq_ReportGuestStats:
2355 case VMMDevReq_ReportGuestUserState:
2356 case VMMDevReq_GetStatisticsChangeRequest:
2357 case VMMDevReq_ChangeMemBalloon:
2358 enmRequired = kLevel_TrustedUsers;
2359 break;
2360
2361 /*
2362 * Anyone.
2363 */
2364 case VMMDevReq_GetMouseStatus:
2365 case VMMDevReq_SetMouseStatus:
2366 case VMMDevReq_SetPointerShape:
2367 case VMMDevReq_GetHostVersion:
2368 case VMMDevReq_Idle:
2369 case VMMDevReq_GetHostTime:
2370 case VMMDevReq_SetPowerStatus:
2371 case VMMDevReq_AcknowledgeEvents:
2372 case VMMDevReq_CtlGuestFilterMask:
2373 case VMMDevReq_ReportGuestStatus:
2374 case VMMDevReq_GetDisplayChangeRequest:
2375 case VMMDevReq_VideoModeSupported:
2376 case VMMDevReq_GetHeightReduction:
2377 case VMMDevReq_GetDisplayChangeRequest2:
2378 case VMMDevReq_VideoModeSupported2:
2379 case VMMDevReq_VideoAccelEnable:
2380 case VMMDevReq_VideoAccelFlush:
2381 case VMMDevReq_VideoSetVisibleRegion:
2382 case VMMDevReq_GetDisplayChangeRequestEx:
2383 case VMMDevReq_GetDisplayChangeRequestMulti:
2384 case VMMDevReq_GetSeamlessChangeRequest:
2385 case VMMDevReq_GetVRDPChangeRequest:
2386 case VMMDevReq_LogString:
2387 case VMMDevReq_GetSessionId:
2388 enmRequired = kLevel_AllUsers;
2389 break;
2390
2391 /*
2392 * Depends on the request parameters...
2393 */
2394 /** @todo this have to be changed into an I/O control and the facilities
2395 * tracked in the session so they can automatically be failed when the
2396 * session terminates without reporting the new status.
2397 *
2398 * The information presented by IGuest is not reliable without this! */
2399 case VMMDevReq_ReportGuestCapabilities:
2400 switch (((VMMDevReportGuestStatus const *)pReqHdr)->guestStatus.facility)
2401 {
2402 case VBoxGuestFacilityType_All:
2403 case VBoxGuestFacilityType_VBoxGuestDriver:
2404 enmRequired = kLevel_OnlyVBoxGuest;
2405 break;
2406 case VBoxGuestFacilityType_VBoxService:
2407 enmRequired = kLevel_TrustedUsers;
2408 break;
2409 case VBoxGuestFacilityType_VBoxTrayClient:
2410 case VBoxGuestFacilityType_Seamless:
2411 case VBoxGuestFacilityType_Graphics:
2412 default:
2413 enmRequired = kLevel_AllUsers;
2414 break;
2415 }
2416 break;
2417 }
2418
2419 /*
2420 * Check against the session.
2421 */
2422 switch (enmRequired)
2423 {
2424 default:
2425 case kLevel_NoOne:
2426 break;
2427 case kLevel_OnlyVBoxGuest:
2428 case kLevel_OnlyKernel:
2429 if (pSession->R0Process == NIL_RTR0PROCESS)
2430 return VINF_SUCCESS;
2431 break;
2432 case kLevel_TrustedUsers:
2433 if (pSession->fUserSession)
2434 break;
2435 case kLevel_AllUsers:
2436 return VINF_SUCCESS;
2437 }
2438
2439 return VERR_PERMISSION_DENIED;
2440}
2441
2442static int vgdrvIoCtl_VMMDevRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2443 VMMDevRequestHeader *pReqHdr, size_t cbData)
2444{
2445 int rc;
2446 VMMDevRequestHeader *pReqCopy;
2447
2448 /*
2449 * Validate the header and request size.
2450 */
2451 const VMMDevRequestType enmType = pReqHdr->requestType;
2452 const uint32_t cbReq = pReqHdr->size;
2453 const uint32_t cbMinSize = (uint32_t)vmmdevGetRequestSize(enmType);
2454
2455 LogFlow(("VBOXGUEST_IOCTL_VMMREQUEST: type %d\n", pReqHdr->requestType));
2456
2457 if (cbReq < cbMinSize)
2458 {
2459 LogRel(("VBOXGUEST_IOCTL_VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
2460 cbReq, cbMinSize, enmType));
2461 return VERR_INVALID_PARAMETER;
2462 }
2463 if (cbReq > cbData)
2464 {
2465 LogRel(("VBOXGUEST_IOCTL_VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
2466 cbData, cbReq, enmType));
2467 return VERR_INVALID_PARAMETER;
2468 }
2469 rc = VbglGR0Verify(pReqHdr, cbData);
2470 if (RT_FAILURE(rc))
2471 {
2472 Log(("VBOXGUEST_IOCTL_VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc!!\n",
2473 cbData, cbReq, enmType, rc));
2474 return rc;
2475 }
2476
2477 rc = vgdrvCheckIfVmmReqIsAllowed(pDevExt, pSession, enmType, pReqHdr);
2478 if (RT_FAILURE(rc))
2479 {
2480 Log(("VBOXGUEST_IOCTL_VMMREQUEST: Operation not allowed! type=%#x rc=%Rrc\n", enmType, rc));
2481 return rc;
2482 }
2483
2484 /*
2485 * Make a copy of the request in the physical memory heap so
2486 * the VBoxGuestLibrary can more easily deal with the request.
2487 * (This is really a waste of time since the OS or the OS specific
2488 * code has already buffered or locked the input/output buffer, but
2489 * it does makes things a bit simpler wrt to phys address.)
2490 */
2491 rc = VbglR0GRAlloc(&pReqCopy, cbReq, enmType);
2492 if (RT_FAILURE(rc))
2493 {
2494 Log(("VBOXGUEST_IOCTL_VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
2495 cbReq, cbReq, rc));
2496 return rc;
2497 }
2498 memcpy(pReqCopy, pReqHdr, cbReq);
2499 Assert(pReqCopy->reserved1 == cbReq);
2500 pReqCopy->reserved1 = 0; /* VGDrvCommonIoCtl or caller sets cbOut, so clear it. */
2501 pReqCopy->fRequestor = pSession->fRequestor;
2502
2503 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
2504 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
2505
2506 rc = VbglR0GRPerform(pReqCopy);
2507 if ( RT_SUCCESS(rc)
2508 && RT_SUCCESS(pReqCopy->rc))
2509 {
2510 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
2511 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
2512
2513 memcpy(pReqHdr, pReqCopy, cbReq);
2514 pReqHdr->reserved1 = cbReq; /* preserve cbOut */
2515 }
2516 else if (RT_FAILURE(rc))
2517 Log(("VBOXGUEST_IOCTL_VMMREQUEST: VbglR0GRPerform - rc=%Rrc!\n", rc));
2518 else
2519 {
2520 Log(("VBOXGUEST_IOCTL_VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
2521 rc = pReqCopy->rc;
2522 }
2523
2524 VbglR0GRFree(pReqCopy);
2525 return rc;
2526}
2527
2528
2529#ifdef VBOX_WITH_HGCM
2530
2531AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
2532
2533/** Worker for vgdrvHgcmAsyncWaitCallback*. */
2534static int vgdrvHgcmAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
2535 bool fInterruptible, uint32_t cMillies)
2536{
2537 int rc;
2538
2539 /*
2540 * Check to see if the condition was met by the time we got here.
2541 *
2542 * We create a simple poll loop here for dealing with out-of-memory
2543 * conditions since the caller isn't necessarily able to deal with
2544 * us returning too early.
2545 */
2546 PVBOXGUESTWAIT pWait;
2547 for (;;)
2548 {
2549 RTSpinlockAcquire(pDevExt->EventSpinlock);
2550 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
2551 {
2552 RTSpinlockRelease(pDevExt->EventSpinlock);
2553 return VINF_SUCCESS;
2554 }
2555 RTSpinlockRelease(pDevExt->EventSpinlock);
2556
2557 pWait = vgdrvWaitAlloc(pDevExt, NULL);
2558 if (pWait)
2559 break;
2560 if (fInterruptible)
2561 return VERR_INTERRUPTED;
2562 RTThreadSleep(1);
2563 }
2564 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
2565 pWait->pHGCMReq = pHdr;
2566
2567 /*
2568 * Re-enter the spinlock and re-check for the condition.
2569 * If the condition is met, return.
2570 * Otherwise link us into the HGCM wait list and go to sleep.
2571 */
2572 RTSpinlockAcquire(pDevExt->EventSpinlock);
2573 RTListAppend(&pDevExt->HGCMWaitList, &pWait->ListNode);
2574 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
2575 {
2576 vgdrvWaitFreeLocked(pDevExt, pWait);
2577 RTSpinlockRelease(pDevExt->EventSpinlock);
2578 return VINF_SUCCESS;
2579 }
2580 RTSpinlockRelease(pDevExt->EventSpinlock);
2581
2582 if (fInterruptible)
2583 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
2584 else
2585 rc = RTSemEventMultiWait(pWait->Event, cMillies);
2586 if (rc == VERR_SEM_DESTROYED)
2587 return rc;
2588
2589 /*
2590 * Unlink, free and return.
2591 */
2592 if ( RT_FAILURE(rc)
2593 && rc != VERR_TIMEOUT
2594 && ( !fInterruptible
2595 || rc != VERR_INTERRUPTED))
2596 LogRel(("vgdrvHgcmAsyncWaitCallback: wait failed! %Rrc\n", rc));
2597
2598 vgdrvWaitFreeUnlocked(pDevExt, pWait);
2599 return rc;
2600}
2601
2602
2603/**
2604 * This is a callback for dealing with async waits.
2605 *
2606 * It operates in a manner similar to vgdrvIoCtl_WaitEvent.
2607 */
2608static DECLCALLBACK(int) vgdrvHgcmAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
2609{
2610 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
2611 LogFlow(("vgdrvHgcmAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
2612 return vgdrvHgcmAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr, pDevExt,
2613 false /* fInterruptible */, u32User /* cMillies */);
2614}
2615
2616
2617/**
2618 * This is a callback for dealing with async waits with a timeout.
2619 *
2620 * It operates in a manner similar to vgdrvIoCtl_WaitEvent.
2621 */
2622static DECLCALLBACK(int) vgdrvHgcmAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
2623{
2624 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
2625 LogFlow(("vgdrvHgcmAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
2626 return vgdrvHgcmAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr, pDevExt,
2627 true /* fInterruptible */, u32User /* cMillies */);
2628}
2629
2630
2631static int vgdrvIoCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCHGCMCONNECT pInfo)
2632{
2633 int rc;
2634 HGCMCLIENTID idClient = 0;
2635
2636 /*
2637 * The VbglHGCMConnect call will invoke the callback if the HGCM
2638 * call is performed in an ASYNC fashion. The function is not able
2639 * to deal with cancelled requests.
2640 */
2641 Log(("VBOXGUEST_IOCTL_HGCM_CONNECT: %.128s\n",
2642 pInfo->u.In.Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->u.In.Loc.type == VMMDevHGCMLoc_LocalHost_Existing
2643 ? pInfo->u.In.Loc.u.host.achName : "<not local host>"));
2644
2645 rc = VbglR0HGCMInternalConnect(&pInfo->u.In.Loc, pSession->fRequestor, &idClient,
2646 vgdrvHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2647 Log(("VBOXGUEST_IOCTL_HGCM_CONNECT: idClient=%RX32 (rc=%Rrc)\n", idClient, rc));
2648 if (RT_SUCCESS(rc))
2649 {
2650 /*
2651 * Append the client id to the client id table.
2652 * If the table has somehow become filled up, we'll disconnect the session.
2653 */
2654 unsigned i;
2655 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2656 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2657 if (!pSession->aHGCMClientIds[i])
2658 {
2659 pSession->aHGCMClientIds[i] = idClient;
2660 break;
2661 }
2662 RTSpinlockRelease(pDevExt->SessionSpinlock);
2663 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
2664 {
2665 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
2666 VbglR0HGCMInternalDisconnect(idClient, pSession->fRequestor, vgdrvHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2667
2668 pInfo->u.Out.idClient = 0;
2669 return VERR_TOO_MANY_OPEN_FILES;
2670 }
2671 }
2672 pInfo->u.Out.idClient = idClient;
2673 return rc;
2674}
2675
2676
2677static int vgdrvIoCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCHGCMDISCONNECT pInfo)
2678{
2679 /*
2680 * Validate the client id and invalidate its entry while we're in the call.
2681 */
2682 int rc;
2683 const uint32_t idClient = pInfo->u.In.idClient;
2684 unsigned i;
2685 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2686 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2687 if (pSession->aHGCMClientIds[i] == idClient)
2688 {
2689 pSession->aHGCMClientIds[i] = UINT32_MAX;
2690 break;
2691 }
2692 RTSpinlockRelease(pDevExt->SessionSpinlock);
2693 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
2694 {
2695 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_DISCONNECT: idClient=%RX32\n", idClient));
2696 return VERR_INVALID_HANDLE;
2697 }
2698
2699 /*
2700 * The VbglHGCMConnect call will invoke the callback if the HGCM
2701 * call is performed in an ASYNC fashion. The function is not able
2702 * to deal with cancelled requests.
2703 */
2704 Log(("VBOXGUEST_IOCTL_HGCM_DISCONNECT: idClient=%RX32\n", idClient));
2705 rc = VbglR0HGCMInternalDisconnect(idClient, pSession->fRequestor, vgdrvHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2706 LogFlow(("VBOXGUEST_IOCTL_HGCM_DISCONNECT: rc=%Rrc\n", rc));
2707
2708 /* Update the client id array according to the result. */
2709 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2710 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
2711 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) ? 0 : idClient;
2712 RTSpinlockRelease(pDevExt->SessionSpinlock);
2713
2714 return rc;
2715}
2716
2717
2718static int vgdrvIoCtl_HGCMCallInner(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCHGCMCALL pInfo,
2719 uint32_t cMillies, bool fInterruptible, bool f32bit, bool fUserData,
2720 size_t cbExtra, size_t cbData)
2721{
2722 const uint32_t u32ClientId = pInfo->u32ClientID;
2723 uint32_t fFlags;
2724 size_t cbActual;
2725 unsigned i;
2726 int rc;
2727
2728 /*
2729 * Some more validations.
2730 */
2731 if (RT_LIKELY(pInfo->cParms <= VMMDEV_MAX_HGCM_PARMS)) /* (Just make sure it doesn't overflow the next check.) */
2732 { /* likely */}
2733 else
2734 {
2735 LogRel(("VBOXGUEST_IOCTL_HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
2736 return VERR_INVALID_PARAMETER;
2737 }
2738
2739 cbActual = cbExtra + sizeof(*pInfo);
2740#ifdef RT_ARCH_AMD64
2741 if (f32bit)
2742 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
2743 else
2744#endif
2745 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
2746 if (RT_LIKELY(cbData >= cbActual))
2747 { /* likely */}
2748 else
2749 {
2750 LogRel(("VBOXGUEST_IOCTL_HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
2751 cbData, cbData, cbActual, cbActual));
2752 return VERR_INVALID_PARAMETER;
2753 }
2754 pInfo->Hdr.cbOut = (uint32_t)cbActual;
2755
2756 /*
2757 * Validate the client id.
2758 */
2759 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2760 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2761 if (pSession->aHGCMClientIds[i] == u32ClientId)
2762 break;
2763 RTSpinlockRelease(pDevExt->SessionSpinlock);
2764 if (RT_LIKELY(i < RT_ELEMENTS(pSession->aHGCMClientIds)))
2765 { /* likely */}
2766 else
2767 {
2768 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
2769 return VERR_INVALID_HANDLE;
2770 }
2771
2772 /*
2773 * The VbglHGCMCall call will invoke the callback if the HGCM
2774 * call is performed in an ASYNC fashion. This function can
2775 * deal with cancelled requests, so we let user more requests
2776 * be interruptible (should add a flag for this later I guess).
2777 */
2778 LogFlow(("VBOXGUEST_IOCTL_HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
2779 fFlags = !fUserData && pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
2780 uint32_t cbInfo = (uint32_t)(cbData - cbExtra);
2781#ifdef RT_ARCH_AMD64
2782 if (f32bit)
2783 {
2784 if (fInterruptible)
2785 rc = VbglR0HGCMInternalCall32(pInfo, cbInfo, fFlags, pSession->fRequestor,
2786 vgdrvHgcmAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2787 else
2788 rc = VbglR0HGCMInternalCall32(pInfo, cbInfo, fFlags, pSession->fRequestor,
2789 vgdrvHgcmAsyncWaitCallback, pDevExt, cMillies);
2790 }
2791 else
2792#endif
2793 {
2794 if (fInterruptible)
2795 rc = VbglR0HGCMInternalCall(pInfo, cbInfo, fFlags, pSession->fRequestor,
2796 vgdrvHgcmAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2797 else
2798 rc = VbglR0HGCMInternalCall(pInfo, cbInfo, fFlags, pSession->fRequestor,
2799 vgdrvHgcmAsyncWaitCallback, pDevExt, cMillies);
2800 }
2801 if (RT_SUCCESS(rc))
2802 {
2803 rc = pInfo->Hdr.rc;
2804 LogFlow(("VBOXGUEST_IOCTL_HGCM_CALL: result=%Rrc\n", rc));
2805 }
2806 else
2807 {
2808 if ( rc != VERR_INTERRUPTED
2809 && rc != VERR_TIMEOUT)
2810 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_CALL: %s Failed. rc=%Rrc (Hdr.rc=%Rrc).\n", f32bit ? "32" : "64", rc, pInfo->Hdr.rc));
2811 else
2812 Log(("VBOXGUEST_IOCTL_HGCM_CALL: %s Failed. rc=%Rrc (Hdr.rc=%Rrc).\n", f32bit ? "32" : "64", rc, pInfo->Hdr.rc));
2813 }
2814 return rc;
2815}
2816
2817
2818static int vgdrvIoCtl_HGCMCallWrapper(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCHGCMCALL pInfo,
2819 bool f32bit, bool fUserData, size_t cbData)
2820{
2821 return vgdrvIoCtl_HGCMCallInner(pDevExt, pSession, pInfo, pInfo->cMsTimeout,
2822 pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2823 f32bit, fUserData, 0 /*cbExtra*/, cbData);
2824}
2825
2826
2827/**
2828 * Handles a fast HGCM call from another driver.
2829 *
2830 * The driver has provided a fully assembled HGCM call request and all we need
2831 * to do is send it to the host and do the wait processing.
2832 *
2833 * @returns VBox status code of the request submission part.
2834 * @param pDevExt The device extension.
2835 * @param pCallReq The call request.
2836 */
2837static int vgdrvIoCtl_HGCMFastCall(PVBOXGUESTDEVEXT pDevExt, VBGLIOCIDCHGCMFASTCALL volatile *pCallReq)
2838{
2839 VMMDevHGCMCall volatile *pHgcmCall = (VMMDevHGCMCall volatile *)(pCallReq + 1);
2840 int rc;
2841
2842 /*
2843 * Check out the physical address.
2844 */
2845 Assert((pCallReq->GCPhysReq & PAGE_OFFSET_MASK) == ((uintptr_t)pHgcmCall & PAGE_OFFSET_MASK));
2846
2847 AssertReturn(!pCallReq->fInterruptible, VERR_NOT_IMPLEMENTED);
2848
2849 /*
2850 * Submit the request.
2851 */
2852 Log(("vgdrvIoCtl_HGCMFastCall -> host\n"));
2853 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pCallReq->GCPhysReq);
2854
2855 /* Make the compiler aware that the host has changed memory. */
2856 ASMCompilerBarrier();
2857
2858 pCallReq->Hdr.rc = rc = pHgcmCall->header.header.rc;
2859 Log(("vgdrvIoCtl_HGCMFastCall -> %Rrc (header rc=%Rrc)\n", rc, pHgcmCall->header.result));
2860
2861 /*
2862 * The host is likely to engage in asynchronous execution of HGCM, unless it fails.
2863 */
2864 if (rc == VINF_HGCM_ASYNC_EXECUTE)
2865 {
2866 rc = vgdrvHgcmAsyncWaitCallbackWorker(&pHgcmCall->header, pDevExt, false /* fInterruptible */, RT_INDEFINITE_WAIT);
2867 if (pHgcmCall->header.fu32Flags & VBOX_HGCM_REQ_DONE)
2868 {
2869 Assert(!(pHgcmCall->header.fu32Flags & VBOX_HGCM_REQ_CANCELLED));
2870 rc = VINF_SUCCESS;
2871 }
2872 else
2873 {
2874 /*
2875 * Timeout and interrupt scenarios are messy and requires
2876 * cancelation, so implement later.
2877 */
2878 AssertReleaseMsgFailed(("rc=%Rrc\n", rc));
2879 }
2880 }
2881 else
2882 Assert((pHgcmCall->header.fu32Flags & VBOX_HGCM_REQ_DONE) || RT_FAILURE_NP(rc));
2883
2884 Log(("vgdrvIoCtl_HGCMFastCall: rc=%Rrc result=%Rrc fu32Flags=%#x\n", rc, pHgcmCall->header.result, pHgcmCall->header.fu32Flags));
2885 return rc;
2886
2887}
2888
2889#endif /* VBOX_WITH_HGCM */
2890
2891/**
2892 * Handle VBGL_IOCTL_CHECK_BALLOON from R3.
2893 *
2894 * Ask the host for the size of the balloon and try to set it accordingly. If
2895 * this approach fails because it's not supported, return with fHandleInR3 set
2896 * and let the user land supply memory we can lock via the other ioctl.
2897 *
2898 * @returns VBox status code.
2899 *
2900 * @param pDevExt The device extension.
2901 * @param pSession The session.
2902 * @param pInfo The output buffer.
2903 */
2904static int vgdrvIoCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCCHECKBALLOON pInfo)
2905{
2906 VMMDevGetMemBalloonChangeRequest *pReq;
2907 int rc;
2908
2909 LogFlow(("VBGL_IOCTL_CHECK_BALLOON:\n"));
2910 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2911 AssertRCReturn(rc, rc);
2912
2913 /*
2914 * The first user trying to query/change the balloon becomes the
2915 * owner and owns it until the session is closed (vgdrvCloseMemBalloon).
2916 */
2917 if ( pDevExt->MemBalloon.pOwner != pSession
2918 && pDevExt->MemBalloon.pOwner == NULL)
2919 pDevExt->MemBalloon.pOwner = pSession;
2920
2921 if (pDevExt->MemBalloon.pOwner == pSession)
2922 {
2923 /*
2924 * This is a response to that event. Setting this bit means that
2925 * we request the value from the host and change the guest memory
2926 * balloon according to this value.
2927 */
2928 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
2929 if (RT_SUCCESS(rc))
2930 {
2931 pReq->header.fRequestor = pSession->fRequestor;
2932 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
2933 rc = VbglR0GRPerform(&pReq->header);
2934 if (RT_SUCCESS(rc))
2935 {
2936 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
2937 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
2938
2939 pInfo->u.Out.cBalloonChunks = pReq->cBalloonChunks;
2940 pInfo->u.Out.fHandleInR3 = false;
2941 pInfo->u.Out.afPadding[0] = false;
2942 pInfo->u.Out.afPadding[1] = false;
2943 pInfo->u.Out.afPadding[2] = false;
2944
2945 rc = vgdrvSetBalloonSizeKernel(pDevExt, pReq->cBalloonChunks, &pInfo->u.Out.fHandleInR3);
2946 /* Ignore various out of memory failures. */
2947 if ( rc == VERR_NO_MEMORY
2948 || rc == VERR_NO_PHYS_MEMORY
2949 || rc == VERR_NO_CONT_MEMORY)
2950 rc = VINF_SUCCESS;
2951 }
2952 else
2953 LogRel(("VBGL_IOCTL_CHECK_BALLOON: VbglR0GRPerform failed. rc=%Rrc\n", rc));
2954 VbglR0GRFree(&pReq->header);
2955 }
2956 }
2957 else
2958 rc = VERR_PERMISSION_DENIED;
2959
2960 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2961 LogFlow(("VBGL_IOCTL_CHECK_BALLOON returns %Rrc\n", rc));
2962 return rc;
2963}
2964
2965
2966/**
2967 * Handle a request for changing the memory balloon.
2968 *
2969 * @returns VBox status code.
2970 *
2971 * @param pDevExt The device extention.
2972 * @param pSession The session.
2973 * @param pInfo The change request structure (input).
2974 */
2975static int vgdrvIoCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCCHANGEBALLOON pInfo)
2976{
2977 int rc;
2978 LogFlow(("VBGL_IOCTL_CHANGE_BALLOON: fInflate=%RTbool u64ChunkAddr=%p\n", pInfo->u.In.fInflate, pInfo->u.In.pvChunk));
2979 if ( pInfo->u.In.abPadding[0]
2980 || pInfo->u.In.abPadding[1]
2981 || pInfo->u.In.abPadding[2]
2982 || pInfo->u.In.abPadding[3]
2983 || pInfo->u.In.abPadding[4]
2984 || pInfo->u.In.abPadding[5]
2985 || pInfo->u.In.abPadding[6]
2986#if ARCH_BITS == 32
2987 || pInfo->u.In.abPadding[7]
2988 || pInfo->u.In.abPadding[8]
2989 || pInfo->u.In.abPadding[9]
2990#endif
2991 )
2992 {
2993 Log(("VBGL_IOCTL_CHANGE_BALLOON: Padding isn't all zero: %.*Rhxs\n", sizeof(pInfo->u.In.abPadding), pInfo->u.In.abPadding));
2994 return VERR_INVALID_PARAMETER;
2995 }
2996
2997 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2998 AssertRCReturn(rc, rc);
2999
3000 if (!pDevExt->MemBalloon.fUseKernelAPI)
3001 {
3002 /*
3003 * The first user trying to query/change the balloon becomes the
3004 * owner and owns it until the session is closed (vgdrvCloseMemBalloon).
3005 */
3006 if ( pDevExt->MemBalloon.pOwner != pSession
3007 && pDevExt->MemBalloon.pOwner == NULL)
3008 pDevExt->MemBalloon.pOwner = pSession;
3009
3010 if (pDevExt->MemBalloon.pOwner == pSession)
3011 rc = vgdrvSetBalloonSizeFromUser(pDevExt, pSession, pInfo->u.In.pvChunk, pInfo->u.In.fInflate != false);
3012 else
3013 rc = VERR_PERMISSION_DENIED;
3014 }
3015 else
3016 rc = VERR_PERMISSION_DENIED;
3017
3018 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
3019 return rc;
3020}
3021
3022
3023/**
3024 * Handle a request for writing a core dump of the guest on the host.
3025 *
3026 * @returns VBox status code.
3027 *
3028 * @param pDevExt The device extension.
3029 * @param pSession The session.
3030 * @param pInfo The output buffer.
3031 */
3032static int vgdrvIoCtl_WriteCoreDump(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCWRITECOREDUMP pInfo)
3033{
3034 VMMDevReqWriteCoreDump *pReq = NULL;
3035 int rc;
3036 LogFlow(("VBOXGUEST_IOCTL_WRITE_CORE_DUMP\n"));
3037 RT_NOREF1(pDevExt);
3038
3039 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_WriteCoreDump);
3040 if (RT_SUCCESS(rc))
3041 {
3042 pReq->header.fRequestor = pSession->fRequestor;
3043 pReq->fFlags = pInfo->u.In.fFlags;
3044 rc = VbglR0GRPerform(&pReq->header);
3045 if (RT_FAILURE(rc))
3046 Log(("VBOXGUEST_IOCTL_WRITE_CORE_DUMP: VbglR0GRPerform failed, rc=%Rrc!\n", rc));
3047
3048 VbglR0GRFree(&pReq->header);
3049 }
3050 else
3051 Log(("VBOXGUEST_IOCTL_WRITE_CORE_DUMP: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
3052 sizeof(*pReq), sizeof(*pReq), rc));
3053 return rc;
3054}
3055
3056
3057/**
3058 * Guest backdoor logging.
3059 *
3060 * @returns VBox status code.
3061 *
3062 * @param pDevExt The device extension.
3063 * @param pch The log message (need not be NULL terminated).
3064 * @param cbData Size of the buffer.
3065 * @param fUserSession Copy of VBOXGUESTSESSION::fUserSession for the
3066 * call. True normal user, false root user.
3067 */
3068static int vgdrvIoCtl_Log(PVBOXGUESTDEVEXT pDevExt, const char *pch, size_t cbData, bool fUserSession)
3069{
3070 if (pDevExt->fLoggingEnabled)
3071 RTLogBackdoorPrintf("%.*s", cbData, pch);
3072 else if (!fUserSession)
3073 LogRel(("%.*s", cbData, pch));
3074 else
3075 Log(("%.*s", cbData, pch));
3076 return VINF_SUCCESS;
3077}
3078
3079
3080/** @name Guest Capabilities, Mouse Status and Event Filter
3081 * @{
3082 */
3083
3084/**
3085 * Clears a bit usage tracker (init time).
3086 *
3087 * @param pTracker The tracker to clear.
3088 */
3089static void vgdrvBitUsageTrackerClear(PVBOXGUESTBITUSAGETRACER pTracker)
3090{
3091 uint32_t iBit;
3092 AssertCompile(sizeof(pTracker->acPerBitUsage) == 32 * sizeof(uint32_t));
3093
3094 for (iBit = 0; iBit < 32; iBit++)
3095 pTracker->acPerBitUsage[iBit] = 0;
3096 pTracker->fMask = 0;
3097}
3098
3099
3100#ifdef VBOX_STRICT
3101/**
3102 * Checks that pTracker->fMask is correct and that the usage values are within
3103 * the valid range.
3104 *
3105 * @param pTracker The tracker.
3106 * @param cMax Max valid usage value.
3107 * @param pszWhat Identifies the tracker in assertions.
3108 */
3109static void vgdrvBitUsageTrackerCheckMask(PCVBOXGUESTBITUSAGETRACER pTracker, uint32_t cMax, const char *pszWhat)
3110{
3111 uint32_t fMask = 0;
3112 uint32_t iBit;
3113 AssertCompile(sizeof(pTracker->acPerBitUsage) == 32 * sizeof(uint32_t));
3114
3115 for (iBit = 0; iBit < 32; iBit++)
3116 if (pTracker->acPerBitUsage[iBit])
3117 {
3118 fMask |= RT_BIT_32(iBit);
3119 AssertMsg(pTracker->acPerBitUsage[iBit] <= cMax,
3120 ("%s: acPerBitUsage[%u]=%#x cMax=%#x\n", pszWhat, iBit, pTracker->acPerBitUsage[iBit], cMax));
3121 }
3122
3123 AssertMsg(fMask == pTracker->fMask, ("%s: %#x vs %#x\n", pszWhat, fMask, pTracker->fMask));
3124}
3125#endif
3126
3127
3128/**
3129 * Applies a change to the bit usage tracker.
3130 *
3131 *
3132 * @returns true if the mask changed, false if not.
3133 * @param pTracker The bit usage tracker.
3134 * @param fChanged The bits to change.
3135 * @param fPrevious The previous value of the bits.
3136 * @param cMax The max valid usage value for assertions.
3137 * @param pszWhat Identifies the tracker in assertions.
3138 */
3139static bool vgdrvBitUsageTrackerChange(PVBOXGUESTBITUSAGETRACER pTracker, uint32_t fChanged, uint32_t fPrevious,
3140 uint32_t cMax, const char *pszWhat)
3141{
3142 bool fGlobalChange = false;
3143 AssertCompile(sizeof(pTracker->acPerBitUsage) == 32 * sizeof(uint32_t));
3144
3145 while (fChanged)
3146 {
3147 uint32_t const iBit = ASMBitFirstSetU32(fChanged) - 1;
3148 uint32_t const fBitMask = RT_BIT_32(iBit);
3149 Assert(iBit < 32); Assert(fBitMask & fChanged);
3150
3151 if (fBitMask & fPrevious)
3152 {
3153 pTracker->acPerBitUsage[iBit] -= 1;
3154 AssertMsg(pTracker->acPerBitUsage[iBit] <= cMax,
3155 ("%s: acPerBitUsage[%u]=%#x cMax=%#x\n", pszWhat, iBit, pTracker->acPerBitUsage[iBit], cMax));
3156 if (pTracker->acPerBitUsage[iBit] == 0)
3157 {
3158 fGlobalChange = true;
3159 pTracker->fMask &= ~fBitMask;
3160 }
3161 }
3162 else
3163 {
3164 pTracker->acPerBitUsage[iBit] += 1;
3165 AssertMsg(pTracker->acPerBitUsage[iBit] > 0 && pTracker->acPerBitUsage[iBit] <= cMax,
3166 ("pTracker->acPerBitUsage[%u]=%#x cMax=%#x\n", pszWhat, iBit, pTracker->acPerBitUsage[iBit], cMax));
3167 if (pTracker->acPerBitUsage[iBit] == 1)
3168 {
3169 fGlobalChange = true;
3170 pTracker->fMask |= fBitMask;
3171 }
3172 }
3173
3174 fChanged &= ~fBitMask;
3175 }
3176
3177#ifdef VBOX_STRICT
3178 vgdrvBitUsageTrackerCheckMask(pTracker, cMax, pszWhat);
3179#endif
3180 NOREF(pszWhat); NOREF(cMax);
3181 return fGlobalChange;
3182}
3183
3184
3185/**
3186 * Init and termination worker for resetting the (host) event filter on the host
3187 *
3188 * @returns VBox status code.
3189 * @param pDevExt The device extension.
3190 * @param fFixedEvents Fixed events (init time).
3191 */
3192static int vgdrvResetEventFilterOnHost(PVBOXGUESTDEVEXT pDevExt, uint32_t fFixedEvents)
3193{
3194 VMMDevCtlGuestFilterMask *pReq;
3195 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
3196 if (RT_SUCCESS(rc))
3197 {
3198 pReq->u32NotMask = UINT32_MAX & ~fFixedEvents;
3199 pReq->u32OrMask = fFixedEvents;
3200 rc = VbglR0GRPerform(&pReq->header);
3201 if (RT_FAILURE(rc))
3202 LogRelFunc(("failed with rc=%Rrc\n", rc));
3203 VbglR0GRFree(&pReq->header);
3204 }
3205 RT_NOREF1(pDevExt);
3206 return rc;
3207}
3208
3209
3210/**
3211 * Changes the event filter mask for the given session.
3212 *
3213 * This is called in response to VBGL_IOCTL_CHANGE_FILTER_MASK as well as to do
3214 * session cleanup.
3215 *
3216 * @returns VBox status code.
3217 * @param pDevExt The device extension.
3218 * @param pSession The session.
3219 * @param fOrMask The events to add.
3220 * @param fNotMask The events to remove.
3221 * @param fSessionTermination Set if we're called by the session cleanup code.
3222 * This tweaks the error handling so we perform
3223 * proper session cleanup even if the host
3224 * misbehaves.
3225 *
3226 * @remarks Takes the session spinlock.
3227 */
3228static int vgdrvSetSessionEventFilter(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
3229 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination)
3230{
3231 VMMDevCtlGuestFilterMask *pReq;
3232 uint32_t fChanged;
3233 uint32_t fPrevious;
3234 int rc;
3235
3236 /*
3237 * Preallocate a request buffer so we can do all in one go without leaving the spinlock.
3238 */
3239 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
3240 if (RT_SUCCESS(rc))
3241 { /* nothing */ }
3242 else if (!fSessionTermination)
3243 {
3244 LogRel(("vgdrvSetSessionFilterMask: VbglR0GRAlloc failure: %Rrc\n", rc));
3245 return rc;
3246 }
3247 else
3248 pReq = NULL; /* Ignore failure, we must do session cleanup. */
3249
3250
3251 RTSpinlockAcquire(pDevExt->SessionSpinlock);
3252
3253 /*
3254 * Apply the changes to the session mask.
3255 */
3256 fPrevious = pSession->fEventFilter;
3257 pSession->fEventFilter |= fOrMask;
3258 pSession->fEventFilter &= ~fNotMask;
3259
3260 /*
3261 * If anything actually changed, update the global usage counters.
3262 */
3263 fChanged = fPrevious ^ pSession->fEventFilter;
3264 LogFlow(("vgdrvSetSessionEventFilter: Session->fEventFilter: %#x -> %#x (changed %#x)\n",
3265 fPrevious, pSession->fEventFilter, fChanged));
3266 if (fChanged)
3267 {
3268 bool fGlobalChange = vgdrvBitUsageTrackerChange(&pDevExt->EventFilterTracker, fChanged, fPrevious,
3269 pDevExt->cSessions, "EventFilterTracker");
3270
3271 /*
3272 * If there are global changes, update the event filter on the host.
3273 */
3274 if (fGlobalChange || pDevExt->fEventFilterHost == UINT32_MAX)
3275 {
3276 Assert(pReq || fSessionTermination);
3277 if (pReq)
3278 {
3279 pReq->u32OrMask = pDevExt->fFixedEvents | pDevExt->EventFilterTracker.fMask;
3280 if (pReq->u32OrMask == pDevExt->fEventFilterHost)
3281 rc = VINF_SUCCESS;
3282 else
3283 {
3284 pDevExt->fEventFilterHost = pReq->u32OrMask;
3285 pReq->u32NotMask = ~pReq->u32OrMask;
3286 rc = VbglR0GRPerform(&pReq->header);
3287 if (RT_FAILURE(rc))
3288 {
3289 /*
3290 * Failed, roll back (unless it's session termination time).
3291 */
3292 pDevExt->fEventFilterHost = UINT32_MAX;
3293 if (!fSessionTermination)
3294 {
3295 vgdrvBitUsageTrackerChange(&pDevExt->EventFilterTracker, fChanged, pSession->fEventFilter,
3296 pDevExt->cSessions, "EventFilterTracker");
3297 pSession->fEventFilter = fPrevious;
3298 }
3299 }
3300 }
3301 }
3302 else
3303 rc = VINF_SUCCESS;
3304 }
3305 }
3306
3307 RTSpinlockRelease(pDevExt->SessionSpinlock);
3308 if (pReq)
3309 VbglR0GRFree(&pReq->header);
3310 return rc;
3311}
3312
3313
3314/**
3315 * Handle VBGL_IOCTL_CHANGE_FILTER_MASK.
3316 *
3317 * @returns VBox status code.
3318 *
3319 * @param pDevExt The device extension.
3320 * @param pSession The session.
3321 * @param pInfo The request.
3322 */
3323static int vgdrvIoCtl_ChangeFilterMask(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCCHANGEFILTERMASK pInfo)
3324{
3325 LogFlow(("VBGL_IOCTL_CHANGE_FILTER_MASK: or=%#x not=%#x\n", pInfo->u.In.fOrMask, pInfo->u.In.fNotMask));
3326
3327 if ((pInfo->u.In.fOrMask | pInfo->u.In.fNotMask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
3328 {
3329 Log(("VBGL_IOCTL_CHANGE_FILTER_MASK: or=%#x not=%#x: Invalid masks!\n", pInfo->u.In.fOrMask, pInfo->u.In.fNotMask));
3330 return VERR_INVALID_PARAMETER;
3331 }
3332
3333 return vgdrvSetSessionEventFilter(pDevExt, pSession, pInfo->u.In.fOrMask, pInfo->u.In.fNotMask, false /*fSessionTermination*/);
3334}
3335
3336
3337/**
3338 * Init and termination worker for set mouse feature status to zero on the host.
3339 *
3340 * @returns VBox status code.
3341 * @param pDevExt The device extension.
3342 */
3343static int vgdrvResetMouseStatusOnHost(PVBOXGUESTDEVEXT pDevExt)
3344{
3345 VMMDevReqMouseStatus *pReq;
3346 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetMouseStatus);
3347 if (RT_SUCCESS(rc))
3348 {
3349 pReq->mouseFeatures = 0;
3350 pReq->pointerXPos = 0;
3351 pReq->pointerYPos = 0;
3352 rc = VbglR0GRPerform(&pReq->header);
3353 if (RT_FAILURE(rc))
3354 LogRelFunc(("failed with rc=%Rrc\n", rc));
3355 VbglR0GRFree(&pReq->header);
3356 }
3357 RT_NOREF1(pDevExt);
3358 return rc;
3359}
3360
3361
3362/**
3363 * Changes the mouse status mask for the given session.
3364 *
3365 * This is called in response to VBOXGUEST_IOCTL_SET_MOUSE_STATUS as well as to
3366 * do session cleanup.
3367 *
3368 * @returns VBox status code.
3369 * @param pDevExt The device extension.
3370 * @param pSession The session.
3371 * @param fOrMask The status flags to add.
3372 * @param fNotMask The status flags to remove.
3373 * @param fSessionTermination Set if we're called by the session cleanup code.
3374 * This tweaks the error handling so we perform
3375 * proper session cleanup even if the host
3376 * misbehaves.
3377 *
3378 * @remarks Takes the session spinlock.
3379 */
3380static int vgdrvSetSessionMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
3381 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination)
3382{
3383 VMMDevReqMouseStatus *pReq;
3384 uint32_t fChanged;
3385 uint32_t fPrevious;
3386 int rc;
3387
3388 /*
3389 * Preallocate a request buffer so we can do all in one go without leaving the spinlock.
3390 */
3391 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetMouseStatus);
3392 if (RT_SUCCESS(rc))
3393 {
3394 if (!fSessionTermination)
3395 pReq->header.fRequestor = pSession->fRequestor;
3396 }
3397 else if (!fSessionTermination)
3398 {
3399 LogRel(("vgdrvSetSessionMouseStatus: VbglR0GRAlloc failure: %Rrc\n", rc));
3400 return rc;
3401 }
3402 else
3403 pReq = NULL; /* Ignore failure, we must do session cleanup. */
3404
3405
3406 RTSpinlockAcquire(pDevExt->SessionSpinlock);
3407
3408 /*
3409 * Apply the changes to the session mask.
3410 */
3411 fPrevious = pSession->fMouseStatus;
3412 pSession->fMouseStatus |= fOrMask;
3413 pSession->fMouseStatus &= ~fNotMask;
3414
3415 /*
3416 * If anything actually changed, update the global usage counters.
3417 */
3418 fChanged = fPrevious ^ pSession->fMouseStatus;
3419 if (fChanged)
3420 {
3421 bool fGlobalChange = vgdrvBitUsageTrackerChange(&pDevExt->MouseStatusTracker, fChanged, fPrevious,
3422 pDevExt->cSessions, "MouseStatusTracker");
3423
3424 /*
3425 * If there are global changes, update the event filter on the host.
3426 */
3427 if (fGlobalChange || pDevExt->fMouseStatusHost == UINT32_MAX)
3428 {
3429 Assert(pReq || fSessionTermination);
3430 if (pReq)
3431 {
3432 pReq->mouseFeatures = pDevExt->MouseStatusTracker.fMask;
3433 if (pReq->mouseFeatures == pDevExt->fMouseStatusHost)
3434 rc = VINF_SUCCESS;
3435 else
3436 {
3437 pDevExt->fMouseStatusHost = pReq->mouseFeatures;
3438 pReq->pointerXPos = 0;
3439 pReq->pointerYPos = 0;
3440 rc = VbglR0GRPerform(&pReq->header);
3441 if (RT_FAILURE(rc))
3442 {
3443 /*
3444 * Failed, roll back (unless it's session termination time).
3445 */
3446 pDevExt->fMouseStatusHost = UINT32_MAX;
3447 if (!fSessionTermination)
3448 {
3449 vgdrvBitUsageTrackerChange(&pDevExt->MouseStatusTracker, fChanged, pSession->fMouseStatus,
3450 pDevExt->cSessions, "MouseStatusTracker");
3451 pSession->fMouseStatus = fPrevious;
3452 }
3453 }
3454 }
3455 }
3456 else
3457 rc = VINF_SUCCESS;
3458 }
3459 }
3460
3461 RTSpinlockRelease(pDevExt->SessionSpinlock);
3462 if (pReq)
3463 VbglR0GRFree(&pReq->header);
3464 return rc;
3465}
3466
3467
3468/**
3469 * Sets the mouse status features for this session and updates them globally.
3470 *
3471 * @returns VBox status code.
3472 *
3473 * @param pDevExt The device extention.
3474 * @param pSession The session.
3475 * @param fFeatures New bitmap of enabled features.
3476 */
3477static int vgdrvIoCtl_SetMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fFeatures)
3478{
3479 LogFlow(("VBGL_IOCTL_SET_MOUSE_STATUS: features=%#x\n", fFeatures));
3480
3481 if (fFeatures & ~VMMDEV_MOUSE_GUEST_MASK)
3482 return VERR_INVALID_PARAMETER;
3483
3484 return vgdrvSetSessionMouseStatus(pDevExt, pSession, fFeatures, ~fFeatures, false /*fSessionTermination*/);
3485}
3486
3487
3488/**
3489 * Return the mask of VMM device events that this session is allowed to see (wrt
3490 * to "acquire" mode guest capabilities).
3491 *
3492 * The events associated with guest capabilities in "acquire" mode will be
3493 * restricted to sessions which has acquired the respective capabilities.
3494 * If someone else tries to wait for acquired events, they won't be woken up
3495 * when the event becomes pending. Should some other thread in the session
3496 * acquire the capability while the corresponding event is pending, the waiting
3497 * thread will woken up.
3498 *
3499 * @returns Mask of events valid for the given session.
3500 * @param pDevExt The device extension.
3501 * @param pSession The session.
3502 *
3503 * @remarks Needs only be called when dispatching events in the
3504 * VBOXGUEST_ACQUIRE_STYLE_EVENTS mask.
3505 */
3506static uint32_t vgdrvGetAllowedEventMaskForSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
3507{
3508 uint32_t fAcquireModeGuestCaps;
3509 uint32_t fAcquiredGuestCaps;
3510 uint32_t fAllowedEvents;
3511
3512 /*
3513 * Note! Reads pSession->fAcquiredGuestCaps and pDevExt->fAcquireModeGuestCaps
3514 * WITHOUT holding VBOXGUESTDEVEXT::SessionSpinlock.
3515 */
3516 fAcquireModeGuestCaps = ASMAtomicUoReadU32(&pDevExt->fAcquireModeGuestCaps);
3517 if (fAcquireModeGuestCaps == 0)
3518 return VMMDEV_EVENT_VALID_EVENT_MASK;
3519 fAcquiredGuestCaps = ASMAtomicUoReadU32(&pSession->fAcquiredGuestCaps);
3520
3521 /*
3522 * Calculate which events to allow according to the cap config and caps
3523 * acquired by the session.
3524 */
3525 fAllowedEvents = VMMDEV_EVENT_VALID_EVENT_MASK;
3526 if ( !(fAcquiredGuestCaps & VMMDEV_GUEST_SUPPORTS_GRAPHICS)
3527 && (fAcquireModeGuestCaps & VMMDEV_GUEST_SUPPORTS_GRAPHICS))
3528 fAllowedEvents &= ~VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST;
3529
3530 if ( !(fAcquiredGuestCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
3531 && (fAcquireModeGuestCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS))
3532 fAllowedEvents &= ~VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
3533
3534 return fAllowedEvents;
3535}
3536
3537
3538/**
3539 * Init and termination worker for set guest capabilities to zero on the host.
3540 *
3541 * @returns VBox status code.
3542 * @param pDevExt The device extension.
3543 */
3544static int vgdrvResetCapabilitiesOnHost(PVBOXGUESTDEVEXT pDevExt)
3545{
3546 VMMDevReqGuestCapabilities2 *pReq;
3547 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
3548 if (RT_SUCCESS(rc))
3549 {
3550 pReq->u32NotMask = UINT32_MAX;
3551 pReq->u32OrMask = 0;
3552 rc = VbglR0GRPerform(&pReq->header);
3553
3554 if (RT_FAILURE(rc))
3555 LogRelFunc(("failed with rc=%Rrc\n", rc));
3556 VbglR0GRFree(&pReq->header);
3557 }
3558 RT_NOREF1(pDevExt);
3559 return rc;
3560}
3561
3562
3563/**
3564 * Sets the guest capabilities to the host while holding the lock.
3565 *
3566 * This will ASSUME that we're the ones in charge of the mask, so
3567 * we'll simply clear all bits we don't set.
3568 *
3569 * @returns VBox status code.
3570 * @param pDevExt The device extension.
3571 * @param pReq The request.
3572 */
3573static int vgdrvUpdateCapabilitiesOnHostWithReqAndLock(PVBOXGUESTDEVEXT pDevExt, VMMDevReqGuestCapabilities2 *pReq)
3574{
3575 int rc;
3576
3577 pReq->u32OrMask = pDevExt->fAcquiredGuestCaps | pDevExt->SetGuestCapsTracker.fMask;
3578 if (pReq->u32OrMask == pDevExt->fGuestCapsHost)
3579 rc = VINF_SUCCESS;
3580 else
3581 {
3582 pDevExt->fGuestCapsHost = pReq->u32OrMask;
3583 pReq->u32NotMask = ~pReq->u32OrMask;
3584 rc = VbglR0GRPerform(&pReq->header);
3585 if (RT_FAILURE(rc))
3586 pDevExt->fGuestCapsHost = UINT32_MAX;
3587 }
3588
3589 return rc;
3590}
3591
3592
3593/**
3594 * Switch a set of capabilities into "acquire" mode and (maybe) acquire them for
3595 * the given session.
3596 *
3597 * This is called in response to VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE as well as
3598 * to do session cleanup.
3599 *
3600 * @returns VBox status code.
3601 * @param pDevExt The device extension.
3602 * @param pSession The session.
3603 * @param fOrMask The capabilities to add .
3604 * @param fNotMask The capabilities to remove. Ignored in
3605 * VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE.
3606 * @param fFlags Confusing operation modifier.
3607 * VBOXGUESTCAPSACQUIRE_FLAGS_NONE means to both
3608 * configure and acquire/release the capabilities.
3609 * VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE
3610 * means only configure capabilities in the
3611 * @a fOrMask capabilities for "acquire" mode.
3612 * @param fSessionTermination Set if we're called by the session cleanup code.
3613 * This tweaks the error handling so we perform
3614 * proper session cleanup even if the host
3615 * misbehaves.
3616 *
3617 * @remarks Takes both the session and event spinlocks.
3618 */
3619static int vgdrvAcquireSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
3620 uint32_t fOrMask, uint32_t fNotMask, uint32_t fFlags,
3621 bool fSessionTermination)
3622{
3623 uint32_t fCurrentOwnedCaps;
3624 uint32_t fSessionRemovedCaps;
3625 uint32_t fSessionAddedCaps;
3626 uint32_t fOtherConflictingCaps;
3627 VMMDevReqGuestCapabilities2 *pReq = NULL;
3628 int rc;
3629
3630
3631 /*
3632 * Validate and adjust input.
3633 */
3634 if (fOrMask & ~( VMMDEV_GUEST_SUPPORTS_SEAMLESS
3635 | VMMDEV_GUEST_SUPPORTS_GUEST_HOST_WINDOW_MAPPING
3636 | VMMDEV_GUEST_SUPPORTS_GRAPHICS ) )
3637 {
3638 LogRel(("vgdrvAcquireSessionCapabilities: invalid fOrMask=%#x (pSession=%p fNotMask=%#x fFlags=%#x)\n",
3639 fOrMask, pSession, fNotMask, fFlags));
3640 return VERR_INVALID_PARAMETER;
3641 }
3642
3643 if ((fFlags & ~VBGL_IOC_AGC_FLAGS_VALID_MASK) != 0)
3644 {
3645 LogRel(("vgdrvAcquireSessionCapabilities: invalid fFlags=%#x (pSession=%p fOrMask=%#x fNotMask=%#x)\n",
3646 fFlags, pSession, fOrMask, fNotMask));
3647 return VERR_INVALID_PARAMETER;
3648 }
3649 Assert(!fOrMask || !fSessionTermination);
3650
3651 /* The fNotMask no need to have all values valid, invalid ones will simply be ignored. */
3652 fNotMask &= ~fOrMask;
3653
3654 /*
3655 * Preallocate a update request if we're about to do more than just configure
3656 * the capability mode.
3657 */
3658 if (!(fFlags & VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE))
3659 {
3660 rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
3661 if (RT_SUCCESS(rc))
3662 {
3663 if (!fSessionTermination)
3664 pReq->header.fRequestor = pSession->fRequestor;
3665 }
3666 else if (!fSessionTermination)
3667 {
3668 LogRel(("vgdrvAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x fFlags=%#x: VbglR0GRAlloc failure: %Rrc\n",
3669 pSession, fOrMask, fNotMask, fFlags, rc));
3670 return rc;
3671 }
3672 else
3673 pReq = NULL; /* Ignore failure, we must do session cleanup. */
3674 }
3675
3676 /*
3677 * Try switch the capabilities in the OR mask into "acquire" mode.
3678 *
3679 * Note! We currently ignore anyone which may already have "set" the capabilities
3680 * in fOrMask. Perhaps not the best way to handle it, but it's simple...
3681 */
3682 RTSpinlockAcquire(pDevExt->EventSpinlock);
3683
3684 if (!(pDevExt->fSetModeGuestCaps & fOrMask))
3685 pDevExt->fAcquireModeGuestCaps |= fOrMask;
3686 else
3687 {
3688 RTSpinlockRelease(pDevExt->EventSpinlock);
3689
3690 if (pReq)
3691 VbglR0GRFree(&pReq->header);
3692 AssertMsgFailed(("Trying to change caps mode: %#x\n", fOrMask));
3693 LogRel(("vgdrvAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x fFlags=%#x: calling caps acquire for set caps\n",
3694 pSession, fOrMask, fNotMask, fFlags));
3695 return VERR_INVALID_STATE;
3696 }
3697
3698 /*
3699 * If we only wanted to switch the capabilities into "acquire" mode, we're done now.
3700 */
3701 if (fFlags & VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE)
3702 {
3703 RTSpinlockRelease(pDevExt->EventSpinlock);
3704
3705 Assert(!pReq);
3706 Log(("vgdrvAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x fFlags=%#x: configured acquire caps: 0x%x\n",
3707 pSession, fOrMask, fNotMask, fFlags));
3708 return VINF_SUCCESS;
3709 }
3710 Assert(pReq || fSessionTermination);
3711
3712 /*
3713 * Caller wants to acquire/release the capabilities too.
3714 *
3715 * Note! The mode change of the capabilities above won't be reverted on
3716 * failure, this is intentional.
3717 */
3718 fCurrentOwnedCaps = pSession->fAcquiredGuestCaps;
3719 fSessionRemovedCaps = fCurrentOwnedCaps & fNotMask;
3720 fSessionAddedCaps = fOrMask & ~fCurrentOwnedCaps;
3721 fOtherConflictingCaps = pDevExt->fAcquiredGuestCaps & ~fCurrentOwnedCaps;
3722 fOtherConflictingCaps &= fSessionAddedCaps;
3723
3724 if (!fOtherConflictingCaps)
3725 {
3726 if (fSessionAddedCaps)
3727 {
3728 pSession->fAcquiredGuestCaps |= fSessionAddedCaps;
3729 pDevExt->fAcquiredGuestCaps |= fSessionAddedCaps;
3730 }
3731
3732 if (fSessionRemovedCaps)
3733 {
3734 pSession->fAcquiredGuestCaps &= ~fSessionRemovedCaps;
3735 pDevExt->fAcquiredGuestCaps &= ~fSessionRemovedCaps;
3736 }
3737
3738 /*
3739 * If something changes (which is very likely), tell the host.
3740 */
3741 if (fSessionAddedCaps || fSessionRemovedCaps || pDevExt->fGuestCapsHost == UINT32_MAX)
3742 {
3743 Assert(pReq || fSessionTermination);
3744 if (pReq)
3745 {
3746 rc = vgdrvUpdateCapabilitiesOnHostWithReqAndLock(pDevExt, pReq);
3747 if (RT_FAILURE(rc) && !fSessionTermination)
3748 {
3749 /* Failed, roll back. */
3750 if (fSessionAddedCaps)
3751 {
3752 pSession->fAcquiredGuestCaps &= ~fSessionAddedCaps;
3753 pDevExt->fAcquiredGuestCaps &= ~fSessionAddedCaps;
3754 }
3755 if (fSessionRemovedCaps)
3756 {
3757 pSession->fAcquiredGuestCaps |= fSessionRemovedCaps;
3758 pDevExt->fAcquiredGuestCaps |= fSessionRemovedCaps;
3759 }
3760
3761 RTSpinlockRelease(pDevExt->EventSpinlock);
3762 LogRel(("vgdrvAcquireSessionCapabilities: vgdrvUpdateCapabilitiesOnHostWithReqAndLock failed: rc=%Rrc\n", rc));
3763 VbglR0GRFree(&pReq->header);
3764 return rc;
3765 }
3766 }
3767 }
3768 }
3769 else
3770 {
3771 RTSpinlockRelease(pDevExt->EventSpinlock);
3772
3773 Log(("vgdrvAcquireSessionCapabilities: Caps %#x were busy\n", fOtherConflictingCaps));
3774 VbglR0GRFree(&pReq->header);
3775 return VERR_RESOURCE_BUSY;
3776 }
3777
3778 RTSpinlockRelease(pDevExt->EventSpinlock);
3779 if (pReq)
3780 VbglR0GRFree(&pReq->header);
3781
3782 /*
3783 * If we added a capability, check if that means some other thread in our
3784 * session should be unblocked because there are events pending.
3785 *
3786 * HACK ALERT! When the seamless support capability is added we generate a
3787 * seamless change event so that the ring-3 client can sync with
3788 * the seamless state. Although this introduces a spurious
3789 * wakeups of the ring-3 client, it solves the problem of client
3790 * state inconsistency in multiuser environment (on Windows).
3791 */
3792 if (fSessionAddedCaps)
3793 {
3794 uint32_t fGenFakeEvents = 0;
3795 if (fSessionAddedCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
3796 fGenFakeEvents |= VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
3797
3798 RTSpinlockAcquire(pDevExt->EventSpinlock);
3799 if (fGenFakeEvents || pDevExt->f32PendingEvents)
3800 vgdrvDispatchEventsLocked(pDevExt, fGenFakeEvents);
3801 RTSpinlockRelease(pDevExt->EventSpinlock);
3802
3803#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
3804 VGDrvCommonWaitDoWakeUps(pDevExt);
3805#endif
3806 }
3807
3808 return VINF_SUCCESS;
3809}
3810
3811
3812/**
3813 * Handle VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES.
3814 *
3815 * @returns VBox status code.
3816 *
3817 * @param pDevExt The device extension.
3818 * @param pSession The session.
3819 * @param pAcquire The request.
3820 */
3821static int vgdrvIoCtl_GuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCACQUIREGUESTCAPS pAcquire)
3822{
3823 int rc;
3824 LogFlow(("VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES: or=%#x not=%#x flags=%#x\n",
3825 pAcquire->u.In.fOrMask, pAcquire->u.In.fNotMask, pAcquire->u.In.fFlags));
3826
3827 rc = vgdrvAcquireSessionCapabilities(pDevExt, pSession, pAcquire->u.In.fOrMask, pAcquire->u.In.fNotMask,
3828 pAcquire->u.In.fFlags, false /*fSessionTermination*/);
3829 if (RT_FAILURE(rc))
3830 LogRel(("VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES failed rc=%Rrc\n", rc));
3831 return rc;
3832}
3833
3834
3835/**
3836 * Sets the guest capabilities for a session.
3837 *
3838 * @returns VBox status code.
3839 * @param pDevExt The device extension.
3840 * @param pSession The session.
3841 * @param fOrMask The capabilities to add.
3842 * @param fNotMask The capabilities to remove.
3843 * @param pfSessionCaps Where to return the guest capabilities reported
3844 * for this session. Optional.
3845 * @param pfGlobalCaps Where to return the guest capabilities reported
3846 * for all the sessions. Optional.
3847 *
3848 * @param fSessionTermination Set if we're called by the session cleanup code.
3849 * This tweaks the error handling so we perform
3850 * proper session cleanup even if the host
3851 * misbehaves.
3852 *
3853 * @remarks Takes the session spinlock.
3854 */
3855static int vgdrvSetSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
3856 uint32_t fOrMask, uint32_t fNotMask, uint32_t *pfSessionCaps, uint32_t *pfGlobalCaps,
3857 bool fSessionTermination)
3858{
3859 /*
3860 * Preallocate a request buffer so we can do all in one go without leaving the spinlock.
3861 */
3862 VMMDevReqGuestCapabilities2 *pReq;
3863 int rc = VbglR0GRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
3864 if (RT_SUCCESS(rc))
3865 {
3866 if (!fSessionTermination)
3867 pReq->header.fRequestor = pSession->fRequestor;
3868 }
3869 else if (!fSessionTermination)
3870 {
3871 if (pfSessionCaps)
3872 *pfSessionCaps = UINT32_MAX;
3873 if (pfGlobalCaps)
3874 *pfGlobalCaps = UINT32_MAX;
3875 LogRel(("vgdrvSetSessionCapabilities: VbglR0GRAlloc failure: %Rrc\n", rc));
3876 return rc;
3877 }
3878 else
3879 pReq = NULL; /* Ignore failure, we must do session cleanup. */
3880
3881
3882 RTSpinlockAcquire(pDevExt->SessionSpinlock);
3883
3884#ifndef VBOXGUEST_DISREGARD_ACQUIRE_MODE_GUEST_CAPS
3885 /*
3886 * Capabilities in "acquire" mode cannot be set via this API.
3887 * (Acquire mode is only used on windows at the time of writing.)
3888 */
3889 if (!(fOrMask & pDevExt->fAcquireModeGuestCaps))
3890#endif
3891 {
3892 /*
3893 * Apply the changes to the session mask.
3894 */
3895 uint32_t fChanged;
3896 uint32_t fPrevious = pSession->fCapabilities;
3897 pSession->fCapabilities |= fOrMask;
3898 pSession->fCapabilities &= ~fNotMask;
3899
3900 /*
3901 * If anything actually changed, update the global usage counters.
3902 */
3903 fChanged = fPrevious ^ pSession->fCapabilities;
3904 if (fChanged)
3905 {
3906 bool fGlobalChange = vgdrvBitUsageTrackerChange(&pDevExt->SetGuestCapsTracker, fChanged, fPrevious,
3907 pDevExt->cSessions, "SetGuestCapsTracker");
3908
3909 /*
3910 * If there are global changes, update the capabilities on the host.
3911 */
3912 if (fGlobalChange || pDevExt->fGuestCapsHost == UINT32_MAX)
3913 {
3914 Assert(pReq || fSessionTermination);
3915 if (pReq)
3916 {
3917 rc = vgdrvUpdateCapabilitiesOnHostWithReqAndLock(pDevExt, pReq);
3918
3919 /* On failure, roll back (unless it's session termination time). */
3920 if (RT_FAILURE(rc) && !fSessionTermination)
3921 {
3922 vgdrvBitUsageTrackerChange(&pDevExt->SetGuestCapsTracker, fChanged, pSession->fCapabilities,
3923 pDevExt->cSessions, "SetGuestCapsTracker");
3924 pSession->fCapabilities = fPrevious;
3925 }
3926 }
3927 }
3928 }
3929 }
3930#ifndef VBOXGUEST_DISREGARD_ACQUIRE_MODE_GUEST_CAPS
3931 else
3932 rc = VERR_RESOURCE_BUSY;
3933#endif
3934
3935 if (pfSessionCaps)
3936 *pfSessionCaps = pSession->fCapabilities;
3937 if (pfGlobalCaps)
3938 *pfGlobalCaps = pDevExt->fAcquiredGuestCaps | pDevExt->SetGuestCapsTracker.fMask;
3939
3940 RTSpinlockRelease(pDevExt->SessionSpinlock);
3941 if (pReq)
3942 VbglR0GRFree(&pReq->header);
3943 return rc;
3944}
3945
3946
3947/**
3948 * Handle VBGL_IOCTL_CHANGE_GUEST_CAPABILITIES.
3949 *
3950 * @returns VBox status code.
3951 *
3952 * @param pDevExt The device extension.
3953 * @param pSession The session.
3954 * @param pInfo The request.
3955 */
3956static int vgdrvIoCtl_SetCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCSETGUESTCAPS pInfo)
3957{
3958 int rc;
3959 LogFlow(("VBGL_IOCTL_CHANGE_GUEST_CAPABILITIES: or=%#x not=%#x\n", pInfo->u.In.fOrMask, pInfo->u.In.fNotMask));
3960
3961 if (!((pInfo->u.In.fOrMask | pInfo->u.In.fNotMask) & ~VMMDEV_GUEST_CAPABILITIES_MASK))
3962 rc = vgdrvSetSessionCapabilities(pDevExt, pSession, pInfo->u.In.fOrMask, pInfo->u.In.fNotMask,
3963 &pInfo->u.Out.fSessionCaps, &pInfo->u.Out.fGlobalCaps, false /*fSessionTermination*/);
3964 else
3965 rc = VERR_INVALID_PARAMETER;
3966
3967 return rc;
3968}
3969
3970/** @} */
3971
3972
3973/**
3974 * Common IOCtl for user to kernel and kernel to kernel communication.
3975 *
3976 * This function only does the basic validation and then invokes
3977 * worker functions that takes care of each specific function.
3978 *
3979 * @returns VBox status code.
3980 *
3981 * @param iFunction The requested function.
3982 * @param pDevExt The device extension.
3983 * @param pSession The client session.
3984 * @param pReqHdr Pointer to the request. This always starts with
3985 * a request common header.
3986 * @param cbReq The max size of the request buffer.
3987 */
3988int VGDrvCommonIoCtl(uintptr_t iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLREQHDR pReqHdr, size_t cbReq)
3989{
3990 uintptr_t const iFunctionStripped = VBGL_IOCTL_CODE_STRIPPED(iFunction);
3991 int rc;
3992
3993 LogFlow(("VGDrvCommonIoCtl: iFunction=%#x pDevExt=%p pSession=%p pReqHdr=%p cbReq=%zu\n",
3994 iFunction, pDevExt, pSession, pReqHdr, cbReq));
3995
3996 /*
3997 * Define some helper macros to simplify validation.
3998 */
3999#define REQ_CHECK_SIZES_EX(Name, cbInExpect, cbOutExpect) \
4000 do { \
4001 if (RT_LIKELY( pReqHdr->cbIn == (cbInExpect) \
4002 && ( pReqHdr->cbOut == (cbOutExpect) \
4003 || ((cbInExpect) == (cbOutExpect) && pReqHdr->cbOut == 0) ) )) \
4004 { /* likely */ } \
4005 else \
4006 { \
4007 Log(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n", \
4008 (long)pReqHdr->cbIn, (long)(cbInExpect), (long)pReqHdr->cbOut, (long)(cbOutExpect))); \
4009 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
4010 } \
4011 } while (0)
4012
4013#define REQ_CHECK_SIZES(Name) REQ_CHECK_SIZES_EX(Name, Name ## _SIZE_IN, Name ## _SIZE_OUT)
4014
4015#define REQ_CHECK_SIZE_IN(Name, cbInExpect) \
4016 do { \
4017 if (RT_LIKELY(pReqHdr->cbIn == (cbInExpect))) \
4018 { /* likely */ } \
4019 else \
4020 { \
4021 Log(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld.\n", \
4022 (long)pReqHdr->cbIn, (long)(cbInExpect))); \
4023 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
4024 } \
4025 } while (0)
4026
4027#define REQ_CHECK_SIZE_OUT(Name, cbOutExpect) \
4028 do { \
4029 if (RT_LIKELY( pReqHdr->cbOut == (cbOutExpect) \
4030 || (pReqHdr->cbOut == 0 && pReqHdr->cbIn == (cbOutExpect)))) \
4031 { /* likely */ } \
4032 else \
4033 { \
4034 Log(( #Name ": Invalid input/output sizes. cbOut=%ld (%ld) expected %ld.\n", \
4035 (long)pReqHdr->cbOut, (long)pReqHdr->cbIn, (long)(cbOutExpect))); \
4036 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
4037 } \
4038 } while (0)
4039
4040#define REQ_CHECK_EXPR(Name, expr) \
4041 do { \
4042 if (RT_LIKELY(!!(expr))) \
4043 { /* likely */ } \
4044 else \
4045 { \
4046 Log(( #Name ": %s\n", #expr)); \
4047 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
4048 } \
4049 } while (0)
4050
4051#define REQ_CHECK_EXPR_FMT(expr, fmt) \
4052 do { \
4053 if (RT_LIKELY(!!(expr))) \
4054 { /* likely */ } \
4055 else \
4056 { \
4057 Log( fmt ); \
4058 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
4059 } \
4060 } while (0)
4061
4062#define REQ_CHECK_RING0(mnemonic) \
4063 do { \
4064 if (pSession->R0Process != NIL_RTR0PROCESS) \
4065 { \
4066 LogFunc((mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
4067 pSession->Process, (uintptr_t)pSession->R0Process)); \
4068 return pReqHdr->rc = VERR_PERMISSION_DENIED; \
4069 } \
4070 } while (0)
4071
4072
4073 /*
4074 * Validate the request.
4075 */
4076 if (RT_LIKELY(cbReq >= sizeof(*pReqHdr)))
4077 { /* likely */ }
4078 else
4079 {
4080 Log(("VGDrvCommonIoCtl: Bad ioctl request size; cbReq=%#lx\n", (long)cbReq));
4081 return VERR_INVALID_PARAMETER;
4082 }
4083
4084 if (pReqHdr->cbOut == 0)
4085 pReqHdr->cbOut = pReqHdr->cbIn;
4086
4087 if (RT_LIKELY( pReqHdr->uVersion == VBGLREQHDR_VERSION
4088 && pReqHdr->cbIn >= sizeof(*pReqHdr)
4089 && pReqHdr->cbIn <= cbReq
4090 && pReqHdr->cbOut >= sizeof(*pReqHdr)
4091 && pReqHdr->cbOut <= cbReq))
4092 { /* likely */ }
4093 else
4094 {
4095 Log(("VGDrvCommonIoCtl: Bad ioctl request header; cbIn=%#lx cbOut=%#lx version=%#lx\n",
4096 (long)pReqHdr->cbIn, (long)pReqHdr->cbOut, (long)pReqHdr->uVersion));
4097 return VERR_INVALID_PARAMETER;
4098 }
4099
4100 if (RT_LIKELY(RT_VALID_PTR(pSession)))
4101 { /* likely */ }
4102 else
4103 {
4104 Log(("VGDrvCommonIoCtl: Invalid pSession value %p (ioctl=%#x)\n", pSession, iFunction));
4105 return VERR_INVALID_PARAMETER;
4106 }
4107
4108
4109 /*
4110 * Deal with variably sized requests first.
4111 */
4112 rc = VINF_SUCCESS;
4113 if ( iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_VMMDEV_REQUEST(0))
4114 || iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_VMMDEV_REQUEST_BIG) )
4115 {
4116 REQ_CHECK_EXPR(VBGL_IOCTL_VMMDEV_REQUEST, pReqHdr->uType != VBGLREQHDR_TYPE_DEFAULT);
4117 REQ_CHECK_EXPR_FMT(pReqHdr->cbIn == pReqHdr->cbOut,
4118 ("VBGL_IOCTL_VMMDEV_REQUEST: cbIn=%ld != cbOut=%ld\n", (long)pReqHdr->cbIn, (long)pReqHdr->cbOut));
4119 pReqHdr->rc = vgdrvIoCtl_VMMDevRequest(pDevExt, pSession, (VMMDevRequestHeader *)pReqHdr, cbReq);
4120 }
4121 else if (RT_LIKELY(pReqHdr->uType == VBGLREQHDR_TYPE_DEFAULT))
4122 {
4123 if (iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_LOG(0)))
4124 {
4125 REQ_CHECK_SIZE_OUT(VBGL_IOCTL_LOG, VBGL_IOCTL_LOG_SIZE_OUT);
4126 pReqHdr->rc = vgdrvIoCtl_Log(pDevExt, &((PVBGLIOCLOG)pReqHdr)->u.In.szMsg[0], pReqHdr->cbIn - sizeof(VBGLREQHDR),
4127 pSession->fUserSession);
4128 }
4129#ifdef VBOX_WITH_HGCM
4130 else if (iFunction == VBGL_IOCTL_IDC_HGCM_FAST_CALL) /* (is variable size, but we don't bother encoding it) */
4131 {
4132 REQ_CHECK_RING0("VBGL_IOCTL_IDC_HGCM_FAST_CALL");
4133 REQ_CHECK_EXPR(VBGL_IOCTL_IDC_HGCM_FAST_CALL, cbReq >= sizeof(VBGLIOCIDCHGCMFASTCALL) + sizeof(VMMDevHGCMCall));
4134 vgdrvIoCtl_HGCMFastCall(pDevExt, (VBGLIOCIDCHGCMFASTCALL volatile *)pReqHdr);
4135 }
4136 else if ( iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_HGCM_CALL(0))
4137# if ARCH_BITS == 64
4138 || iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_HGCM_CALL_32(0))
4139# endif
4140 )
4141 {
4142 REQ_CHECK_EXPR(VBGL_IOCTL_HGCM_CALL, pReqHdr->cbIn >= sizeof(VBGLIOCHGCMCALL));
4143 REQ_CHECK_EXPR(VBGL_IOCTL_HGCM_CALL, pReqHdr->cbIn == pReqHdr->cbOut);
4144 pReqHdr->rc = vgdrvIoCtl_HGCMCallWrapper(pDevExt, pSession, (PVBGLIOCHGCMCALL)pReqHdr,
4145 iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_HGCM_CALL_32(0)),
4146 false /*fUserData*/, cbReq);
4147 }
4148 else if (iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_HGCM_CALL_WITH_USER_DATA(0)))
4149 {
4150 REQ_CHECK_RING0("VBGL_IOCTL_HGCM_CALL_WITH_USER_DATA");
4151 REQ_CHECK_EXPR(VBGL_IOCTL_HGCM_CALL, pReqHdr->cbIn >= sizeof(VBGLIOCHGCMCALL));
4152 REQ_CHECK_EXPR(VBGL_IOCTL_HGCM_CALL, pReqHdr->cbIn == pReqHdr->cbOut);
4153 pReqHdr->rc = vgdrvIoCtl_HGCMCallWrapper(pDevExt, pSession, (PVBGLIOCHGCMCALL)pReqHdr,
4154 ARCH_BITS == 32, true /*fUserData*/, cbReq);
4155 }
4156#endif /* VBOX_WITH_HGCM */
4157 else
4158 {
4159 switch (iFunction)
4160 {
4161 /*
4162 * Ring-0 only:
4163 */
4164 case VBGL_IOCTL_IDC_CONNECT:
4165 REQ_CHECK_RING0("VBGL_IOCL_IDC_CONNECT");
4166 REQ_CHECK_SIZES(VBGL_IOCTL_IDC_CONNECT);
4167 pReqHdr->rc = vgdrvIoCtl_IdcConnect(pDevExt, pSession, (PVBGLIOCIDCCONNECT)pReqHdr);
4168 break;
4169
4170 case VBGL_IOCTL_IDC_DISCONNECT:
4171 REQ_CHECK_RING0("VBGL_IOCTL_IDC_DISCONNECT");
4172 REQ_CHECK_SIZES(VBGL_IOCTL_IDC_DISCONNECT);
4173 pReqHdr->rc = vgdrvIoCtl_IdcDisconnect(pDevExt, pSession, (PVBGLIOCIDCDISCONNECT)pReqHdr);
4174 break;
4175
4176 case VBGL_IOCTL_GET_VMMDEV_IO_INFO:
4177 REQ_CHECK_RING0("GET_VMMDEV_IO_INFO");
4178 REQ_CHECK_SIZES(VBGL_IOCTL_GET_VMMDEV_IO_INFO);
4179 pReqHdr->rc = vgdrvIoCtl_GetVMMDevIoInfo(pDevExt, (PVBGLIOCGETVMMDEVIOINFO)pReqHdr);
4180 break;
4181
4182 case VBGL_IOCTL_SET_MOUSE_NOTIFY_CALLBACK:
4183 REQ_CHECK_RING0("SET_MOUSE_NOTIFY_CALLBACK");
4184 REQ_CHECK_SIZES(VBGL_IOCTL_SET_MOUSE_NOTIFY_CALLBACK);
4185 pReqHdr->rc = vgdrvIoCtl_SetMouseNotifyCallback(pDevExt, (PVBGLIOCSETMOUSENOTIFYCALLBACK)pReqHdr);
4186 break;
4187
4188 /*
4189 * Ring-3 only:
4190 */
4191 case VBGL_IOCTL_DRIVER_VERSION_INFO:
4192 REQ_CHECK_SIZES(VBGL_IOCTL_DRIVER_VERSION_INFO);
4193 pReqHdr->rc = vgdrvIoCtl_DriverVersionInfo(pDevExt, pSession, (PVBGLIOCDRIVERVERSIONINFO)pReqHdr);
4194 break;
4195
4196 /*
4197 * Both ring-3 and ring-0:
4198 */
4199 case VBGL_IOCTL_WAIT_FOR_EVENTS:
4200 REQ_CHECK_SIZES(VBGL_IOCTL_WAIT_FOR_EVENTS);
4201 pReqHdr->rc = vgdrvIoCtl_WaitForEvents(pDevExt, pSession, (VBGLIOCWAITFOREVENTS *)pReqHdr,
4202 pSession->R0Process != NIL_RTR0PROCESS);
4203 break;
4204
4205 case VBGL_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS:
4206 REQ_CHECK_SIZES(VBGL_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS);
4207 pReqHdr->rc = vgdrvIoCtl_CancelAllWaitEvents(pDevExt, pSession);
4208 break;
4209
4210 case VBGL_IOCTL_CHANGE_FILTER_MASK:
4211 REQ_CHECK_SIZES(VBGL_IOCTL_CHANGE_FILTER_MASK);
4212 pReqHdr->rc = vgdrvIoCtl_ChangeFilterMask(pDevExt, pSession, (PVBGLIOCCHANGEFILTERMASK)pReqHdr);
4213 break;
4214
4215#ifdef VBOX_WITH_HGCM
4216 case VBGL_IOCTL_HGCM_CONNECT:
4217 REQ_CHECK_SIZES(VBGL_IOCTL_HGCM_CONNECT);
4218 pReqHdr->rc = vgdrvIoCtl_HGCMConnect(pDevExt, pSession, (PVBGLIOCHGCMCONNECT)pReqHdr);
4219 break;
4220
4221 case VBGL_IOCTL_HGCM_DISCONNECT:
4222 REQ_CHECK_SIZES(VBGL_IOCTL_HGCM_DISCONNECT);
4223 pReqHdr->rc = vgdrvIoCtl_HGCMDisconnect(pDevExt, pSession, (PVBGLIOCHGCMDISCONNECT)pReqHdr);
4224 break;
4225#endif
4226
4227 case VBGL_IOCTL_CHECK_BALLOON:
4228 REQ_CHECK_SIZES(VBGL_IOCTL_CHECK_BALLOON);
4229 pReqHdr->rc = vgdrvIoCtl_CheckMemoryBalloon(pDevExt, pSession, (PVBGLIOCCHECKBALLOON)pReqHdr);
4230 break;
4231
4232 case VBGL_IOCTL_CHANGE_BALLOON:
4233 REQ_CHECK_SIZES(VBGL_IOCTL_CHANGE_BALLOON);
4234 pReqHdr->rc = vgdrvIoCtl_ChangeMemoryBalloon(pDevExt, pSession, (PVBGLIOCCHANGEBALLOON)pReqHdr);
4235 break;
4236
4237 case VBGL_IOCTL_WRITE_CORE_DUMP:
4238 REQ_CHECK_SIZES(VBGL_IOCTL_WRITE_CORE_DUMP);
4239 pReqHdr->rc = vgdrvIoCtl_WriteCoreDump(pDevExt, pSession, (PVBGLIOCWRITECOREDUMP)pReqHdr);
4240 break;
4241
4242 case VBGL_IOCTL_SET_MOUSE_STATUS:
4243 REQ_CHECK_SIZES(VBGL_IOCTL_SET_MOUSE_STATUS);
4244 pReqHdr->rc = vgdrvIoCtl_SetMouseStatus(pDevExt, pSession, ((PVBGLIOCSETMOUSESTATUS)pReqHdr)->u.In.fStatus);
4245 break;
4246
4247 case VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES:
4248 REQ_CHECK_SIZES(VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES);
4249 pReqHdr->rc = vgdrvIoCtl_GuestCapsAcquire(pDevExt, pSession, (PVBGLIOCACQUIREGUESTCAPS)pReqHdr);
4250 break;
4251
4252 case VBGL_IOCTL_CHANGE_GUEST_CAPABILITIES:
4253 REQ_CHECK_SIZES(VBGL_IOCTL_CHANGE_GUEST_CAPABILITIES);
4254 pReqHdr->rc = vgdrvIoCtl_SetCapabilities(pDevExt, pSession, (PVBGLIOCSETGUESTCAPS)pReqHdr);
4255 break;
4256
4257#ifdef VBOX_WITH_DPC_LATENCY_CHECKER
4258 case VBGL_IOCTL_DPC_LATENCY_CHECKER:
4259 REQ_CHECK_SIZES(VBGL_IOCTL_DPC_LATENCY_CHECKER);
4260 pReqHdr->rc = VGDrvNtIOCtl_DpcLatencyChecker();
4261 break;
4262#endif
4263
4264 default:
4265 {
4266 LogRel(("VGDrvCommonIoCtl: Unknown request iFunction=%#x (stripped %#x) cbReq=%#x\n",
4267 iFunction, iFunctionStripped, cbReq));
4268 pReqHdr->rc = rc = VERR_NOT_SUPPORTED;
4269 break;
4270 }
4271 }
4272 }
4273 }
4274 else
4275 {
4276 Log(("VGDrvCommonIoCtl: uType=%#x, expected default (ioctl=%#x)\n", pReqHdr->uType, iFunction));
4277 return VERR_INVALID_PARAMETER;
4278 }
4279
4280 LogFlow(("VGDrvCommonIoCtl: returns %Rrc (req: rc=%Rrc cbOut=%#x)\n", rc, pReqHdr->rc, pReqHdr->cbOut));
4281 return rc;
4282}
4283
4284
4285/**
4286 * Used by VGDrvCommonISR as well as the acquire guest capability code.
4287 *
4288 * @returns VINF_SUCCESS on success. On failure, ORed together
4289 * RTSemEventMultiSignal errors (completes processing despite errors).
4290 * @param pDevExt The VBoxGuest device extension.
4291 * @param fEvents The events to dispatch.
4292 */
4293static int vgdrvDispatchEventsLocked(PVBOXGUESTDEVEXT pDevExt, uint32_t fEvents)
4294{
4295 PVBOXGUESTWAIT pWait;
4296 PVBOXGUESTWAIT pSafe;
4297 int rc = VINF_SUCCESS;
4298
4299 fEvents |= pDevExt->f32PendingEvents;
4300
4301 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
4302 {
4303 uint32_t fHandledEvents = pWait->fReqEvents & fEvents;
4304 if ( fHandledEvents != 0
4305 && !pWait->fResEvents)
4306 {
4307 /* Does this one wait on any of the events we're dispatching? We do a quick
4308 check first, then deal with VBOXGUEST_ACQUIRE_STYLE_EVENTS as applicable. */
4309 if (fHandledEvents & VBOXGUEST_ACQUIRE_STYLE_EVENTS)
4310 fHandledEvents &= vgdrvGetAllowedEventMaskForSession(pDevExt, pWait->pSession);
4311 if (fHandledEvents)
4312 {
4313 pWait->fResEvents = pWait->fReqEvents & fEvents & fHandledEvents;
4314 fEvents &= ~pWait->fResEvents;
4315 RTListNodeRemove(&pWait->ListNode);
4316#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
4317 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
4318#else
4319 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
4320 rc |= RTSemEventMultiSignal(pWait->Event);
4321#endif
4322 if (!fEvents)
4323 break;
4324 }
4325 }
4326 }
4327
4328 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
4329 return rc;
4330}
4331
4332
4333/**
4334 * Simply checks whether the IRQ is ours or not, does not do any interrupt
4335 * procesing.
4336 *
4337 * @returns true if it was our interrupt, false if it wasn't.
4338 * @param pDevExt The VBoxGuest device extension.
4339 */
4340bool VGDrvCommonIsOurIRQ(PVBOXGUESTDEVEXT pDevExt)
4341{
4342 VMMDevMemory volatile *pVMMDevMemory;
4343 bool fOurIrq;
4344
4345 RTSpinlockAcquire(pDevExt->EventSpinlock);
4346 pVMMDevMemory = pDevExt->pVMMDevMemory;
4347 fOurIrq = pVMMDevMemory ? pVMMDevMemory->V.V1_04.fHaveEvents : false;
4348 RTSpinlockRelease(pDevExt->EventSpinlock);
4349
4350 return fOurIrq;
4351}
4352
4353
4354/**
4355 * Common interrupt service routine.
4356 *
4357 * This deals with events and with waking up thread waiting for those events.
4358 *
4359 * @returns true if it was our interrupt, false if it wasn't.
4360 * @param pDevExt The VBoxGuest device extension.
4361 */
4362bool VGDrvCommonISR(PVBOXGUESTDEVEXT pDevExt)
4363{
4364 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
4365 bool fMousePositionChanged = false;
4366 int rc = 0;
4367 VMMDevMemory volatile *pVMMDevMemory;
4368 bool fOurIrq;
4369
4370 /*
4371 * Make sure we've initialized the device extension.
4372 */
4373 if (RT_UNLIKELY(!pReq))
4374 return false;
4375
4376 /*
4377 * Enter the spinlock and check if it's our IRQ or not.
4378 */
4379 RTSpinlockAcquire(pDevExt->EventSpinlock);
4380 pVMMDevMemory = pDevExt->pVMMDevMemory;
4381 fOurIrq = pVMMDevMemory ? pVMMDevMemory->V.V1_04.fHaveEvents : false;
4382 if (fOurIrq)
4383 {
4384 /*
4385 * Acknowlegde events.
4386 * We don't use VbglR0GRPerform here as it may take another spinlocks.
4387 */
4388 pReq->header.rc = VERR_INTERNAL_ERROR;
4389 pReq->events = 0;
4390 ASMCompilerBarrier();
4391 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
4392 ASMCompilerBarrier(); /* paranoia */
4393 if (RT_SUCCESS(pReq->header.rc))
4394 {
4395 uint32_t fEvents = pReq->events;
4396
4397 Log3(("VGDrvCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
4398
4399 /*
4400 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
4401 */
4402 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
4403 {
4404 fMousePositionChanged = true;
4405 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
4406#if !defined(VBOXGUEST_MOUSE_NOTIFY_CAN_PREEMPT)
4407 if (pDevExt->pfnMouseNotifyCallback)
4408 pDevExt->pfnMouseNotifyCallback(pDevExt->pvMouseNotifyCallbackArg);
4409#endif
4410 }
4411
4412#ifdef VBOX_WITH_HGCM
4413 /*
4414 * The HGCM event/list is kind of different in that we evaluate all entries.
4415 */
4416 if (fEvents & VMMDEV_EVENT_HGCM)
4417 {
4418 PVBOXGUESTWAIT pWait;
4419 PVBOXGUESTWAIT pSafe;
4420 RTListForEachSafe(&pDevExt->HGCMWaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
4421 {
4422 if (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE)
4423 {
4424 pWait->fResEvents = VMMDEV_EVENT_HGCM;
4425 RTListNodeRemove(&pWait->ListNode);
4426# ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
4427 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
4428# else
4429 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
4430 rc |= RTSemEventMultiSignal(pWait->Event);
4431# endif
4432 }
4433 }
4434 fEvents &= ~VMMDEV_EVENT_HGCM;
4435 }
4436#endif
4437
4438 /*
4439 * Normal FIFO waiter evaluation.
4440 */
4441 rc |= vgdrvDispatchEventsLocked(pDevExt, fEvents);
4442 }
4443 else /* something is serious wrong... */
4444 Log(("VGDrvCommonISR: acknowledge events failed rc=%Rrc (events=%#x)!!\n",
4445 pReq->header.rc, pReq->events));
4446 }
4447 else
4448 Log3(("VGDrvCommonISR: not ours\n"));
4449
4450 RTSpinlockRelease(pDevExt->EventSpinlock);
4451
4452 /*
4453 * Execute the mouse notification callback here if it cannot be executed while
4454 * holding the interrupt safe spinlock, see @bugref{8639}.
4455 */
4456#if defined(VBOXGUEST_MOUSE_NOTIFY_CAN_PREEMPT) && !defined(RT_OS_WINDOWS) /* (Windows does this in the Dpc callback) */
4457 if ( fMousePositionChanged
4458 && pDevExt->pfnMouseNotifyCallback)
4459 pDevExt->pfnMouseNotifyCallback(pDevExt->pvMouseNotifyCallbackArg);
4460#endif
4461
4462#if defined(VBOXGUEST_USE_DEFERRED_WAKE_UP) && !defined(RT_OS_DARWIN) && !defined(RT_OS_WINDOWS)
4463 /*
4464 * Do wake-ups.
4465 * Note. On Windows this isn't possible at this IRQL, so a DPC will take
4466 * care of it. Same on darwin, doing it in the work loop callback.
4467 */
4468 VGDrvCommonWaitDoWakeUps(pDevExt);
4469#endif
4470
4471 /*
4472 * Work the poll and async notification queues on OSes that implements that.
4473 * (Do this outside the spinlock to prevent some recursive spinlocking.)
4474 */
4475 if (fMousePositionChanged)
4476 {
4477 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
4478 VGDrvNativeISRMousePollEvent(pDevExt);
4479 }
4480
4481 Assert(rc == 0);
4482 NOREF(rc);
4483 return fOurIrq;
4484}
4485
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette