VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 68558

Last change on this file since 68558 was 68558, checked in by vboxsync, 7 years ago

merging vbglioc r117710: VBoxGuest.cpp: logging fixes

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 143.3 KB
Line 
1/* $Id: VBoxGuest.cpp 68558 2017-08-31 12:10:05Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27/** @page pg_vbdrv VBoxGuest
28 *
29 * VBoxGuest is the device driver for VMMDev.
30 *
31 * The device driver is shipped as part of the guest additions. It has roots in
32 * the host VMM support driver (usually known as VBoxDrv), so fixes in platform
33 * specific code may apply to both drivers.
34 *
35 * The common code lives in VBoxGuest.cpp and is compiled both as C++ and C.
36 * The VBoxGuest.cpp source file shall not contain platform specific code,
37 * though it must occationally do a few \#ifdef RT_OS_XXX tests to cater for
38 * platform differences. Though, in those cases, it is common that more than
39 * one platform needs special handling.
40 *
41 * On most platforms the device driver should create two device nodes, one for
42 * full (unrestricted) access to the feature set, and one which only provides a
43 * restrict set of functions. These are generally referred to as 'vboxguest'
44 * and 'vboxuser' respectively. Currently, this two device approach is only
45 * implemented on Linux!
46 *
47 */
48
49
50/*********************************************************************************************************************************
51* Header Files *
52*********************************************************************************************************************************/
53#define LOG_GROUP LOG_GROUP_DEFAULT
54#include "VBoxGuestInternal.h"
55#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
56#include <VBox/log.h>
57#include <iprt/mem.h>
58#include <iprt/time.h>
59#include <iprt/memobj.h>
60#include <iprt/asm.h>
61#include <iprt/asm-amd64-x86.h>
62#include <iprt/string.h>
63#include <iprt/process.h>
64#include <iprt/assert.h>
65#include <iprt/param.h>
66#include <iprt/timer.h>
67#ifdef VBOX_WITH_HGCM
68# include <iprt/thread.h>
69#endif
70#include "version-generated.h"
71#if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
72# include "revision-generated.h"
73#endif
74#if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
75# include <iprt/rand.h>
76#endif
77
78
79/*********************************************************************************************************************************
80* Defined Constants And Macros *
81*********************************************************************************************************************************/
82#define VBOXGUEST_ACQUIRE_STYLE_EVENTS (VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST | VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST)
83
84
85/*********************************************************************************************************************************
86* Internal Functions *
87*********************************************************************************************************************************/
88#ifdef VBOX_WITH_HGCM
89static DECLCALLBACK(int) vgdrvHgcmAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
90#endif
91static int vgdrvIoCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession);
92static void vgdrvBitUsageTrackerClear(PVBOXGUESTBITUSAGETRACER pTracker);
93static uint32_t vgdrvGetAllowedEventMaskForSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession);
94static int vgdrvResetEventFilterOnHost(PVBOXGUESTDEVEXT pDevExt, uint32_t fFixedEvents);
95static int vgdrvResetMouseStatusOnHost(PVBOXGUESTDEVEXT pDevExt);
96static int vgdrvResetCapabilitiesOnHost(PVBOXGUESTDEVEXT pDevExt);
97static int vgdrvSetSessionEventFilter(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
98 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination);
99static int vgdrvSetSessionMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
100 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination);
101static int vgdrvSetSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
102 uint32_t fOrMask, uint32_t fNoMask,
103 uint32_t *pfSessionCaps, uint32_t *pfGlobalCaps, bool fSessionTermination);
104static int vgdrvAcquireSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
105 uint32_t fOrMask, uint32_t fNotMask, uint32_t fFlags, bool fSessionTermination);
106static int vgdrvDispatchEventsLocked(PVBOXGUESTDEVEXT pDevExt, uint32_t fEvents);
107
108
109/*********************************************************************************************************************************
110* Global Variables *
111*********************************************************************************************************************************/
112static const uint32_t g_cbChangeMemBalloonReq = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
113
114#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
115/**
116 * Drag in the rest of IRPT since we share it with the
117 * rest of the kernel modules on Solaris.
118 */
119PFNRT g_apfnVBoxGuestIPRTDeps[] =
120{
121 /* VirtioNet */
122 (PFNRT)RTRandBytes,
123 /* RTSemMutex* */
124 (PFNRT)RTSemMutexCreate,
125 (PFNRT)RTSemMutexDestroy,
126 (PFNRT)RTSemMutexRequest,
127 (PFNRT)RTSemMutexRequestNoResume,
128 (PFNRT)RTSemMutexRequestDebug,
129 (PFNRT)RTSemMutexRequestNoResumeDebug,
130 (PFNRT)RTSemMutexRelease,
131 (PFNRT)RTSemMutexIsOwned,
132 NULL
133};
134#endif /* RT_OS_DARWIN || RT_OS_SOLARIS */
135
136
137/**
138 * Reserves memory in which the VMM can relocate any guest mappings
139 * that are floating around.
140 *
141 * This operation is a little bit tricky since the VMM might not accept
142 * just any address because of address clashes between the three contexts
143 * it operates in, so use a small stack to perform this operation.
144 *
145 * @returns VBox status code (ignored).
146 * @param pDevExt The device extension.
147 */
148static int vgdrvInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
149{
150 /*
151 * Query the required space.
152 */
153 VMMDevReqHypervisorInfo *pReq;
154 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
155 if (RT_FAILURE(rc))
156 return rc;
157 pReq->hypervisorStart = 0;
158 pReq->hypervisorSize = 0;
159 rc = VbglGRPerform(&pReq->header);
160 if (RT_FAILURE(rc)) /* this shouldn't happen! */
161 {
162 VbglGRFree(&pReq->header);
163 return rc;
164 }
165
166 /*
167 * The VMM will report back if there is nothing it wants to map, like for
168 * instance in VT-x and AMD-V mode.
169 */
170 if (pReq->hypervisorSize == 0)
171 Log(("vgdrvInitFixateGuestMappings: nothing to do\n"));
172 else
173 {
174 /*
175 * We have to try several times since the host can be picky
176 * about certain addresses.
177 */
178 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
179 uint32_t cbHypervisor = pReq->hypervisorSize;
180 RTR0MEMOBJ ahTries[5];
181 uint32_t iTry;
182 bool fBitched = false;
183 Log(("vgdrvInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
184 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
185 {
186 /*
187 * Reserve space, or if that isn't supported, create a object for
188 * some fictive physical memory and map that in to kernel space.
189 *
190 * To make the code a bit uglier, most systems cannot help with
191 * 4MB alignment, so we have to deal with that in addition to
192 * having two ways of getting the memory.
193 */
194 uint32_t uAlignment = _4M;
195 RTR0MEMOBJ hObj;
196 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
197 if (rc == VERR_NOT_SUPPORTED)
198 {
199 uAlignment = PAGE_SIZE;
200 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
201 }
202 /*
203 * If both RTR0MemObjReserveKernel calls above failed because either not supported or
204 * not implemented at all at the current platform, try to map the memory object into the
205 * virtual kernel space.
206 */
207 if (rc == VERR_NOT_SUPPORTED)
208 {
209 if (hFictive == NIL_RTR0MEMOBJ)
210 {
211 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
212 if (RT_FAILURE(rc))
213 break;
214 hFictive = hObj;
215 }
216 uAlignment = _4M;
217 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
218 if (rc == VERR_NOT_SUPPORTED)
219 {
220 uAlignment = PAGE_SIZE;
221 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
222 }
223 }
224 if (RT_FAILURE(rc))
225 {
226 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
227 rc, cbHypervisor, uAlignment, iTry));
228 fBitched = true;
229 break;
230 }
231
232 /*
233 * Try set it.
234 */
235 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
236 pReq->header.rc = VERR_INTERNAL_ERROR;
237 pReq->hypervisorSize = cbHypervisor;
238 pReq->hypervisorStart = (RTGCPTR32)(uintptr_t)RTR0MemObjAddress(hObj);
239 if ( uAlignment == PAGE_SIZE
240 && pReq->hypervisorStart & (_4M - 1))
241 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
242 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
243
244 rc = VbglGRPerform(&pReq->header);
245 if (RT_SUCCESS(rc))
246 {
247 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
248 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
249 RTR0MemObjAddress(pDevExt->hGuestMappings),
250 RTR0MemObjSize(pDevExt->hGuestMappings),
251 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
252 break;
253 }
254 ahTries[iTry] = hObj;
255 }
256
257 /*
258 * Cleanup failed attempts.
259 */
260 while (iTry-- > 0)
261 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
262 if ( RT_FAILURE(rc)
263 && hFictive != NIL_RTR0PTR)
264 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
265 if (RT_FAILURE(rc) && !fBitched)
266 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
267 }
268 VbglGRFree(&pReq->header);
269
270 /*
271 * We ignore failed attempts for now.
272 */
273 return VINF_SUCCESS;
274}
275
276
277/**
278 * Undo what vgdrvInitFixateGuestMappings did.
279 *
280 * @param pDevExt The device extension.
281 */
282static void vgdrvTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
283{
284 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
285 {
286 /*
287 * Tell the host that we're going to free the memory we reserved for
288 * it, the free it up. (Leak the memory if anything goes wrong here.)
289 */
290 VMMDevReqHypervisorInfo *pReq;
291 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
292 if (RT_SUCCESS(rc))
293 {
294 pReq->hypervisorStart = 0;
295 pReq->hypervisorSize = 0;
296 rc = VbglGRPerform(&pReq->header);
297 VbglGRFree(&pReq->header);
298 }
299 if (RT_SUCCESS(rc))
300 {
301 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
302 AssertRC(rc);
303 }
304 else
305 LogRel(("vgdrvTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
306
307 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
308 }
309}
310
311
312
313/**
314 * Report the guest information to the host.
315 *
316 * @returns IPRT status code.
317 * @param enmOSType The OS type to report.
318 */
319static int vgdrvReportGuestInfo(VBOXOSTYPE enmOSType)
320{
321 /*
322 * Allocate and fill in the two guest info reports.
323 */
324 VMMDevReportGuestInfo2 *pReqInfo2 = NULL;
325 VMMDevReportGuestInfo *pReqInfo1 = NULL;
326 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReqInfo2, sizeof (VMMDevReportGuestInfo2), VMMDevReq_ReportGuestInfo2);
327 Log(("vgdrvReportGuestInfo: VbglGRAlloc VMMDevReportGuestInfo2 completed with rc=%Rrc\n", rc));
328 if (RT_SUCCESS(rc))
329 {
330 pReqInfo2->guestInfo.additionsMajor = VBOX_VERSION_MAJOR;
331 pReqInfo2->guestInfo.additionsMinor = VBOX_VERSION_MINOR;
332 pReqInfo2->guestInfo.additionsBuild = VBOX_VERSION_BUILD;
333 pReqInfo2->guestInfo.additionsRevision = VBOX_SVN_REV;
334 pReqInfo2->guestInfo.additionsFeatures = 0; /* (no features defined yet) */
335 RTStrCopy(pReqInfo2->guestInfo.szName, sizeof(pReqInfo2->guestInfo.szName), VBOX_VERSION_STRING);
336
337 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReqInfo1, sizeof (VMMDevReportGuestInfo), VMMDevReq_ReportGuestInfo);
338 Log(("vgdrvReportGuestInfo: VbglGRAlloc VMMDevReportGuestInfo completed with rc=%Rrc\n", rc));
339 if (RT_SUCCESS(rc))
340 {
341 pReqInfo1->guestInfo.interfaceVersion = VMMDEV_VERSION;
342 pReqInfo1->guestInfo.osType = enmOSType;
343
344 /*
345 * There are two protocols here:
346 * 1. Info2 + Info1. Supported by >=3.2.51.
347 * 2. Info1 and optionally Info2. The old protocol.
348 *
349 * We try protocol 1 first. It will fail with VERR_NOT_SUPPORTED
350 * if not supported by the VMMDev (message ordering requirement).
351 */
352 rc = VbglGRPerform(&pReqInfo2->header);
353 Log(("vgdrvReportGuestInfo: VbglGRPerform VMMDevReportGuestInfo2 completed with rc=%Rrc\n", rc));
354 if (RT_SUCCESS(rc))
355 {
356 rc = VbglGRPerform(&pReqInfo1->header);
357 Log(("vgdrvReportGuestInfo: VbglGRPerform VMMDevReportGuestInfo completed with rc=%Rrc\n", rc));
358 }
359 else if ( rc == VERR_NOT_SUPPORTED
360 || rc == VERR_NOT_IMPLEMENTED)
361 {
362 rc = VbglGRPerform(&pReqInfo1->header);
363 Log(("vgdrvReportGuestInfo: VbglGRPerform VMMDevReportGuestInfo completed with rc=%Rrc\n", rc));
364 if (RT_SUCCESS(rc))
365 {
366 rc = VbglGRPerform(&pReqInfo2->header);
367 Log(("vgdrvReportGuestInfo: VbglGRPerform VMMDevReportGuestInfo2 completed with rc=%Rrc\n", rc));
368 if (rc == VERR_NOT_IMPLEMENTED)
369 rc = VINF_SUCCESS;
370 }
371 }
372 VbglGRFree(&pReqInfo1->header);
373 }
374 VbglGRFree(&pReqInfo2->header);
375 }
376
377 return rc;
378}
379
380
381/**
382 * Report the guest driver status to the host.
383 *
384 * @returns IPRT status code.
385 * @param fActive Flag whether the driver is now active or not.
386 */
387static int vgdrvReportDriverStatus(bool fActive)
388{
389 /*
390 * Report guest status of the VBox driver to the host.
391 */
392 VMMDevReportGuestStatus *pReq2 = NULL;
393 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq2, sizeof(*pReq2), VMMDevReq_ReportGuestStatus);
394 Log(("vgdrvReportDriverStatus: VbglGRAlloc VMMDevReportGuestStatus completed with rc=%Rrc\n", rc));
395 if (RT_SUCCESS(rc))
396 {
397 pReq2->guestStatus.facility = VBoxGuestFacilityType_VBoxGuestDriver;
398 pReq2->guestStatus.status = fActive ?
399 VBoxGuestFacilityStatus_Active
400 : VBoxGuestFacilityStatus_Inactive;
401 pReq2->guestStatus.flags = 0;
402 rc = VbglGRPerform(&pReq2->header);
403 Log(("vgdrvReportDriverStatus: VbglGRPerform VMMDevReportGuestStatus completed with fActive=%d, rc=%Rrc\n",
404 fActive ? 1 : 0, rc));
405 if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
406 rc = VINF_SUCCESS;
407 VbglGRFree(&pReq2->header);
408 }
409
410 return rc;
411}
412
413
414/** @name Memory Ballooning
415 * @{
416 */
417
418/**
419 * Inflate the balloon by one chunk represented by an R0 memory object.
420 *
421 * The caller owns the balloon mutex.
422 *
423 * @returns IPRT status code.
424 * @param pMemObj Pointer to the R0 memory object.
425 * @param pReq The pre-allocated request for performing the VMMDev call.
426 */
427static int vgdrvBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
428{
429 uint32_t iPage;
430 int rc;
431
432 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
433 {
434 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
435 pReq->aPhysPage[iPage] = phys;
436 }
437
438 pReq->fInflate = true;
439 pReq->header.size = g_cbChangeMemBalloonReq;
440 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
441
442 rc = VbglGRPerform(&pReq->header);
443 if (RT_FAILURE(rc))
444 LogRel(("vgdrvBalloonInflate: VbglGRPerform failed. rc=%Rrc\n", rc));
445 return rc;
446}
447
448
449/**
450 * Deflate the balloon by one chunk - info the host and free the memory object.
451 *
452 * The caller owns the balloon mutex.
453 *
454 * @returns IPRT status code.
455 * @param pMemObj Pointer to the R0 memory object.
456 * The memory object will be freed afterwards.
457 * @param pReq The pre-allocated request for performing the VMMDev call.
458 */
459static int vgdrvBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
460{
461 uint32_t iPage;
462 int rc;
463
464 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
465 {
466 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
467 pReq->aPhysPage[iPage] = phys;
468 }
469
470 pReq->fInflate = false;
471 pReq->header.size = g_cbChangeMemBalloonReq;
472 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
473
474 rc = VbglGRPerform(&pReq->header);
475 if (RT_FAILURE(rc))
476 {
477 LogRel(("vgdrvBalloonDeflate: VbglGRPerform failed. rc=%Rrc\n", rc));
478 return rc;
479 }
480
481 rc = RTR0MemObjFree(*pMemObj, true);
482 if (RT_FAILURE(rc))
483 {
484 LogRel(("vgdrvBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
485 return rc;
486 }
487
488 *pMemObj = NIL_RTR0MEMOBJ;
489 return VINF_SUCCESS;
490}
491
492
493/**
494 * Inflate/deflate the memory balloon and notify the host.
495 *
496 * This is a worker used by vgdrvIoCtl_CheckMemoryBalloon - it takes the mutex.
497 *
498 * @returns VBox status code.
499 * @param pDevExt The device extension.
500 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
501 * @param pfHandleInR3 Where to return the handle-in-ring3 indicator
502 * (VINF_SUCCESS if set).
503 */
504static int vgdrvSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, bool *pfHandleInR3)
505{
506 int rc = VINF_SUCCESS;
507
508 if (pDevExt->MemBalloon.fUseKernelAPI)
509 {
510 VMMDevChangeMemBalloon *pReq;
511 uint32_t i;
512
513 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
514 {
515 LogRel(("vgdrvSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
516 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
517 return VERR_INVALID_PARAMETER;
518 }
519
520 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
521 return VINF_SUCCESS; /* nothing to do */
522
523 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
524 && !pDevExt->MemBalloon.paMemObj)
525 {
526 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
527 if (!pDevExt->MemBalloon.paMemObj)
528 {
529 LogRel(("vgdrvSetBalloonSizeKernel: no memory for paMemObj!\n"));
530 return VERR_NO_MEMORY;
531 }
532 }
533
534 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, g_cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
535 if (RT_FAILURE(rc))
536 return rc;
537
538 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
539 {
540 /* inflate */
541 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
542 {
543 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
544 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
545 if (RT_FAILURE(rc))
546 {
547 if (rc == VERR_NOT_SUPPORTED)
548 {
549 /* not supported -- fall back to the R3-allocated memory. */
550 rc = VINF_SUCCESS;
551 pDevExt->MemBalloon.fUseKernelAPI = false;
552 Assert(pDevExt->MemBalloon.cChunks == 0);
553 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
554 }
555 /* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
556 * cannot allocate more memory => don't try further, just stop here */
557 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
558 break;
559 }
560
561 rc = vgdrvBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
562 if (RT_FAILURE(rc))
563 {
564 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
565 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
566 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
567 break;
568 }
569 pDevExt->MemBalloon.cChunks++;
570 }
571 }
572 else
573 {
574 /* deflate */
575 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
576 {
577 rc = vgdrvBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
578 if (RT_FAILURE(rc))
579 {
580 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
581 break;
582 }
583 pDevExt->MemBalloon.cChunks--;
584 }
585 }
586
587 VbglGRFree(&pReq->header);
588 }
589
590 /*
591 * Set the handle-in-ring3 indicator. When set Ring-3 will have to work
592 * the balloon changes via the other API.
593 */
594 *pfHandleInR3 = pDevExt->MemBalloon.fUseKernelAPI ? false : true;
595
596 return rc;
597}
598
599
600/**
601 * Inflate/deflate the balloon by one chunk.
602 *
603 * Worker for vgdrvIoCtl_ChangeMemoryBalloon - it takes the mutex.
604 *
605 * @returns VBox status code.
606 * @param pDevExt The device extension.
607 * @param pSession The session.
608 * @param pvChunk The address of the chunk to add to / remove from the
609 * balloon. (user space address)
610 * @param fInflate Inflate if true, deflate if false.
611 */
612static int vgdrvSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, RTR3PTR pvChunk, bool fInflate)
613{
614 VMMDevChangeMemBalloon *pReq;
615 PRTR0MEMOBJ pMemObj = NULL;
616 int rc = VINF_SUCCESS;
617 uint32_t i;
618 RT_NOREF1(pSession);
619
620 if (fInflate)
621 {
622 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
623 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
624 {
625 LogRel(("vgdrvSetBalloonSizeFromUser: cannot inflate balloon, already have %u chunks (max=%u)\n",
626 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
627 return VERR_INVALID_PARAMETER;
628 }
629
630 if (!pDevExt->MemBalloon.paMemObj)
631 {
632 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
633 if (!pDevExt->MemBalloon.paMemObj)
634 {
635 LogRel(("vgdrvSetBalloonSizeFromUser: no memory for paMemObj!\n"));
636 return VERR_NO_MEMORY;
637 }
638 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
639 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
640 }
641 }
642 else
643 {
644 if (pDevExt->MemBalloon.cChunks == 0)
645 {
646 AssertMsgFailed(("vgdrvSetBalloonSizeFromUser: cannot decrease balloon, already at size 0\n"));
647 return VERR_INVALID_PARAMETER;
648 }
649 }
650
651 /*
652 * Enumerate all memory objects and check if the object is already registered.
653 */
654 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
655 {
656 if ( fInflate
657 && !pMemObj
658 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
659 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
660 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == pvChunk)
661 {
662 if (fInflate)
663 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
664 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
665 break;
666 }
667 }
668 if (!pMemObj)
669 {
670 if (fInflate)
671 {
672 /* no free object pointer found -- should not happen */
673 return VERR_NO_MEMORY;
674 }
675
676 /* cannot free this memory as it wasn't provided before */
677 return VERR_NOT_FOUND;
678 }
679
680 /*
681 * Try inflate / default the balloon as requested.
682 */
683 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, g_cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
684 if (RT_FAILURE(rc))
685 return rc;
686
687 if (fInflate)
688 {
689 rc = RTR0MemObjLockUser(pMemObj, pvChunk, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
690 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
691 if (RT_SUCCESS(rc))
692 {
693 rc = vgdrvBalloonInflate(pMemObj, pReq);
694 if (RT_SUCCESS(rc))
695 pDevExt->MemBalloon.cChunks++;
696 else
697 {
698 Log(("vgdrvSetBalloonSizeFromUser(inflate): failed, rc=%Rrc!\n", rc));
699 RTR0MemObjFree(*pMemObj, true);
700 *pMemObj = NIL_RTR0MEMOBJ;
701 }
702 }
703 }
704 else
705 {
706 rc = vgdrvBalloonDeflate(pMemObj, pReq);
707 if (RT_SUCCESS(rc))
708 pDevExt->MemBalloon.cChunks--;
709 else
710 Log(("vgdrvSetBalloonSizeFromUser(deflate): failed, rc=%Rrc!\n", rc));
711 }
712
713 VbglGRFree(&pReq->header);
714 return rc;
715}
716
717
718/**
719 * Cleanup the memory balloon of a session.
720 *
721 * Will request the balloon mutex, so it must be valid and the caller must not
722 * own it already.
723 *
724 * @param pDevExt The device extension.
725 * @param pSession The session. Can be NULL at unload.
726 */
727static void vgdrvCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
728{
729 RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
730 if ( pDevExt->MemBalloon.pOwner == pSession
731 || pSession == NULL /*unload*/)
732 {
733 if (pDevExt->MemBalloon.paMemObj)
734 {
735 VMMDevChangeMemBalloon *pReq;
736 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, g_cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
737 if (RT_SUCCESS(rc))
738 {
739 uint32_t i;
740 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
741 {
742 rc = vgdrvBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
743 if (RT_FAILURE(rc))
744 {
745 LogRel(("vgdrvCloseMemBalloon: Deflate failed with rc=%Rrc. Will leak %u chunks.\n",
746 rc, pDevExt->MemBalloon.cChunks));
747 break;
748 }
749 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
750 pDevExt->MemBalloon.cChunks--;
751 }
752 VbglGRFree(&pReq->header);
753 }
754 else
755 LogRel(("vgdrvCloseMemBalloon: Failed to allocate VMMDev request buffer (rc=%Rrc). Will leak %u chunks.\n",
756 rc, pDevExt->MemBalloon.cChunks));
757 RTMemFree(pDevExt->MemBalloon.paMemObj);
758 pDevExt->MemBalloon.paMemObj = NULL;
759 }
760
761 pDevExt->MemBalloon.pOwner = NULL;
762 }
763 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
764}
765
766/** @} */
767
768
769
770/** @name Heartbeat
771 * @{
772 */
773
774/**
775 * Sends heartbeat to host.
776 *
777 * @returns VBox status code.
778 */
779static int vgdrvHeartbeatSend(PVBOXGUESTDEVEXT pDevExt)
780{
781 int rc;
782 if (pDevExt->pReqGuestHeartbeat)
783 {
784 rc = VbglGRPerform(pDevExt->pReqGuestHeartbeat);
785 Log3(("vgdrvHeartbeatSend: VbglGRPerform vgdrvHeartbeatSend completed with rc=%Rrc\n", rc));
786 }
787 else
788 rc = VERR_INVALID_STATE;
789 return rc;
790}
791
792
793/**
794 * Callback for heartbeat timer.
795 */
796static DECLCALLBACK(void) vgdrvHeartbeatTimerHandler(PRTTIMER hTimer, void *pvUser, uint64_t iTick)
797{
798 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
799 int rc;
800 AssertReturnVoid(pDevExt);
801
802 rc = vgdrvHeartbeatSend(pDevExt);
803 if (RT_FAILURE(rc))
804 Log(("HB Timer: vgdrvHeartbeatSend failed: rc=%Rrc\n", rc));
805
806 NOREF(hTimer); NOREF(iTick);
807}
808
809
810/**
811 * Configure the host to check guest's heartbeat
812 * and get heartbeat interval from the host.
813 *
814 * @returns VBox status code.
815 * @param pDevExt The device extension.
816 * @param fEnabled Set true to enable guest heartbeat checks on host.
817 */
818static int vgdrvHeartbeatHostConfigure(PVBOXGUESTDEVEXT pDevExt, bool fEnabled)
819{
820 VMMDevReqHeartbeat *pReq;
821 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_HeartbeatConfigure);
822 Log(("vgdrvHeartbeatHostConfigure: VbglGRAlloc vgdrvHeartbeatHostConfigure completed with rc=%Rrc\n", rc));
823 if (RT_SUCCESS(rc))
824 {
825 pReq->fEnabled = fEnabled;
826 pReq->cNsInterval = 0;
827 rc = VbglGRPerform(&pReq->header);
828 Log(("vgdrvHeartbeatHostConfigure: VbglGRPerform vgdrvHeartbeatHostConfigure completed with rc=%Rrc\n", rc));
829 pDevExt->cNsHeartbeatInterval = pReq->cNsInterval;
830 VbglGRFree(&pReq->header);
831 }
832 return rc;
833}
834
835
836/**
837 * Initializes the heartbeat timer.
838 *
839 * This feature may be disabled by the host.
840 *
841 * @returns VBox status (ignored).
842 * @param pDevExt The device extension.
843 */
844static int vgdrvHeartbeatInit(PVBOXGUESTDEVEXT pDevExt)
845{
846 /*
847 * Make sure that heartbeat checking is disabled.
848 */
849 int rc = vgdrvHeartbeatHostConfigure(pDevExt, false);
850 if (RT_SUCCESS(rc))
851 {
852 rc = vgdrvHeartbeatHostConfigure(pDevExt, true);
853 if (RT_SUCCESS(rc))
854 {
855 /*
856 * Preallocate the request to use it from the timer callback because:
857 * 1) on Windows VbglGRAlloc must be called at IRQL <= APC_LEVEL
858 * and the timer callback runs at DISPATCH_LEVEL;
859 * 2) avoid repeated allocations.
860 */
861 rc = VbglGRAlloc(&pDevExt->pReqGuestHeartbeat, sizeof(*pDevExt->pReqGuestHeartbeat), VMMDevReq_GuestHeartbeat);
862 if (RT_SUCCESS(rc))
863 {
864 LogRel(("vgdrvHeartbeatInit: Setting up heartbeat to trigger every %RU64 milliseconds\n",
865 pDevExt->cNsHeartbeatInterval / RT_NS_1MS));
866 rc = RTTimerCreateEx(&pDevExt->pHeartbeatTimer, pDevExt->cNsHeartbeatInterval, 0 /*fFlags*/,
867 (PFNRTTIMER)vgdrvHeartbeatTimerHandler, pDevExt);
868 if (RT_SUCCESS(rc))
869 {
870 rc = RTTimerStart(pDevExt->pHeartbeatTimer, 0);
871 if (RT_SUCCESS(rc))
872 return VINF_SUCCESS;
873
874 LogRel(("vgdrvHeartbeatInit: Heartbeat timer failed to start, rc=%Rrc\n", rc));
875 }
876 else
877 LogRel(("vgdrvHeartbeatInit: Failed to create heartbeat timer: %Rrc\n", rc));
878
879 VbglGRFree(pDevExt->pReqGuestHeartbeat);
880 pDevExt->pReqGuestHeartbeat = NULL;
881 }
882 else
883 LogRel(("vgdrvHeartbeatInit: VbglGRAlloc(VMMDevReq_GuestHeartbeat): %Rrc\n", rc));
884
885 LogRel(("vgdrvHeartbeatInit: Failed to set up the timer, guest heartbeat is disabled\n"));
886 vgdrvHeartbeatHostConfigure(pDevExt, false);
887 }
888 else
889 LogRel(("vgdrvHeartbeatInit: Failed to configure host for heartbeat checking: rc=%Rrc\n", rc));
890 }
891 return rc;
892}
893
894/** @} */
895
896
897/**
898 * Helper to reinit the VMMDev communication after hibernation.
899 *
900 * @returns VBox status code.
901 * @param pDevExt The device extension.
902 * @param enmOSType The OS type.
903 *
904 * @todo Call this on all platforms, not just windows.
905 */
906int VGDrvCommonReinitDevExtAfterHibernation(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
907{
908 int rc = vgdrvReportGuestInfo(enmOSType);
909 if (RT_SUCCESS(rc))
910 {
911 rc = vgdrvReportDriverStatus(true /* Driver is active */);
912 if (RT_FAILURE(rc))
913 Log(("VGDrvCommonReinitDevExtAfterHibernation: could not report guest driver status, rc=%Rrc\n", rc));
914 }
915 else
916 Log(("VGDrvCommonReinitDevExtAfterHibernation: could not report guest information to host, rc=%Rrc\n", rc));
917 LogFlow(("VGDrvCommonReinitDevExtAfterHibernation: returned with rc=%Rrc\n", rc));
918 RT_NOREF1(pDevExt);
919 return rc;
920}
921
922
923/**
924 * Initializes the VBoxGuest device extension when the
925 * device driver is loaded.
926 *
927 * The native code locates the VMMDev on the PCI bus and retrieve
928 * the MMIO and I/O port ranges, this function will take care of
929 * mapping the MMIO memory (if present). Upon successful return
930 * the native code should set up the interrupt handler.
931 *
932 * @returns VBox status code.
933 *
934 * @param pDevExt The device extension. Allocated by the native code.
935 * @param IOPortBase The base of the I/O port range.
936 * @param pvMMIOBase The base of the MMIO memory mapping.
937 * This is optional, pass NULL if not present.
938 * @param cbMMIO The size of the MMIO memory mapping.
939 * This is optional, pass 0 if not present.
940 * @param enmOSType The guest OS type to report to the VMMDev.
941 * @param fFixedEvents Events that will be enabled upon init and no client
942 * will ever be allowed to mask.
943 */
944int VGDrvCommonInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
945 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
946{
947 int rc, rc2;
948
949#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
950 /*
951 * Create the release log.
952 */
953 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
954 PRTLOGGER pRelLogger;
955 rc = RTLogCreate(&pRelLogger, 0 /*fFlags*/, "all", "VBOXGUEST_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups,
956 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
957 if (RT_SUCCESS(rc))
958 RTLogRelSetDefaultInstance(pRelLogger);
959 /** @todo Add native hook for getting logger config parameters and setting
960 * them. On linux we should use the module parameter stuff... */
961#endif
962
963 /*
964 * Adjust fFixedEvents.
965 */
966#ifdef VBOX_WITH_HGCM
967 fFixedEvents |= VMMDEV_EVENT_HGCM;
968#endif
969
970 /*
971 * Initialize the data.
972 */
973 pDevExt->IOPortBase = IOPortBase;
974 pDevExt->pVMMDevMemory = NULL;
975 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
976 pDevExt->EventSpinlock = NIL_RTSPINLOCK;
977 pDevExt->pIrqAckEvents = NULL;
978 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
979 RTListInit(&pDevExt->WaitList);
980#ifdef VBOX_WITH_HGCM
981 RTListInit(&pDevExt->HGCMWaitList);
982#endif
983#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
984 RTListInit(&pDevExt->WakeUpList);
985#endif
986 RTListInit(&pDevExt->WokenUpList);
987 RTListInit(&pDevExt->FreeList);
988 RTListInit(&pDevExt->SessionList);
989 pDevExt->cSessions = 0;
990 pDevExt->fLoggingEnabled = false;
991 pDevExt->f32PendingEvents = 0;
992 pDevExt->u32MousePosChangedSeq = 0;
993 pDevExt->SessionSpinlock = NIL_RTSPINLOCK;
994 pDevExt->MemBalloon.hMtx = NIL_RTSEMFASTMUTEX;
995 pDevExt->MemBalloon.cChunks = 0;
996 pDevExt->MemBalloon.cMaxChunks = 0;
997 pDevExt->MemBalloon.fUseKernelAPI = true;
998 pDevExt->MemBalloon.paMemObj = NULL;
999 pDevExt->MemBalloon.pOwner = NULL;
1000 pDevExt->pfnMouseNotifyCallback = NULL;
1001 pDevExt->pvMouseNotifyCallbackArg = NULL;
1002 pDevExt->pReqGuestHeartbeat = NULL;
1003
1004 pDevExt->fFixedEvents = fFixedEvents;
1005 vgdrvBitUsageTrackerClear(&pDevExt->EventFilterTracker);
1006 pDevExt->fEventFilterHost = UINT32_MAX; /* forces a report */
1007
1008 vgdrvBitUsageTrackerClear(&pDevExt->MouseStatusTracker);
1009 pDevExt->fMouseStatusHost = UINT32_MAX; /* forces a report */
1010
1011 pDevExt->fAcquireModeGuestCaps = 0;
1012 pDevExt->fSetModeGuestCaps = 0;
1013 pDevExt->fAcquiredGuestCaps = 0;
1014 vgdrvBitUsageTrackerClear(&pDevExt->SetGuestCapsTracker);
1015 pDevExt->fGuestCapsHost = UINT32_MAX; /* forces a report */
1016
1017 /*
1018 * If there is an MMIO region validate the version and size.
1019 */
1020 if (pvMMIOBase)
1021 {
1022 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
1023 Assert(cbMMIO);
1024 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
1025 && pVMMDev->u32Size >= 32
1026 && pVMMDev->u32Size <= cbMMIO)
1027 {
1028 pDevExt->pVMMDevMemory = pVMMDev;
1029 Log(("VGDrvCommonInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
1030 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
1031 }
1032 else /* try live without it. */
1033 LogRel(("VGDrvCommonInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
1034 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
1035 }
1036
1037 /*
1038 * Create the wait and session spinlocks as well as the ballooning mutex.
1039 */
1040 rc = RTSpinlockCreate(&pDevExt->EventSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestEvent");
1041 if (RT_SUCCESS(rc))
1042 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestSession");
1043 if (RT_FAILURE(rc))
1044 {
1045 LogRel(("VGDrvCommonInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
1046 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
1047 RTSpinlockDestroy(pDevExt->EventSpinlock);
1048 return rc;
1049 }
1050
1051 rc = RTSemFastMutexCreate(&pDevExt->MemBalloon.hMtx);
1052 if (RT_FAILURE(rc))
1053 {
1054 LogRel(("VGDrvCommonInitDevExt: failed to create mutex, rc=%Rrc!\n", rc));
1055 RTSpinlockDestroy(pDevExt->SessionSpinlock);
1056 RTSpinlockDestroy(pDevExt->EventSpinlock);
1057 return rc;
1058 }
1059
1060 /*
1061 * Initialize the guest library and report the guest info back to VMMDev,
1062 * set the interrupt control filter mask, and fixate the guest mappings
1063 * made by the VMM.
1064 */
1065 rc = VbglInitPrimary(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
1066 if (RT_SUCCESS(rc))
1067 {
1068 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
1069 if (RT_SUCCESS(rc))
1070 {
1071 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
1072 Assert(pDevExt->PhysIrqAckEvents != 0);
1073
1074 rc = vgdrvReportGuestInfo(enmOSType);
1075 if (RT_SUCCESS(rc))
1076 {
1077 /*
1078 * Set the fixed event and make sure the host doesn't have any lingering
1079 * the guest capabilities or mouse status bits set.
1080 */
1081 rc = vgdrvResetEventFilterOnHost(pDevExt, pDevExt->fFixedEvents);
1082 if (RT_SUCCESS(rc))
1083 {
1084 rc = vgdrvResetCapabilitiesOnHost(pDevExt);
1085 if (RT_SUCCESS(rc))
1086 {
1087 rc = vgdrvResetMouseStatusOnHost(pDevExt);
1088 if (RT_SUCCESS(rc))
1089 {
1090 /*
1091 * Initialize stuff which may fail without requiring the driver init to fail.
1092 */
1093 vgdrvInitFixateGuestMappings(pDevExt);
1094 vgdrvHeartbeatInit(pDevExt);
1095
1096 /*
1097 * Done!
1098 */
1099 rc = vgdrvReportDriverStatus(true /* Driver is active */);
1100 if (RT_FAILURE(rc))
1101 LogRel(("VGDrvCommonInitDevExt: VBoxReportGuestDriverStatus failed, rc=%Rrc\n", rc));
1102
1103 LogFlowFunc(("VGDrvCommonInitDevExt: returns success\n"));
1104 return VINF_SUCCESS;
1105 }
1106 LogRel(("VGDrvCommonInitDevExt: failed to clear mouse status: rc=%Rrc\n", rc));
1107 }
1108 else
1109 LogRel(("VGDrvCommonInitDevExt: failed to clear guest capabilities: rc=%Rrc\n", rc));
1110 }
1111 else
1112 LogRel(("VGDrvCommonInitDevExt: failed to set fixed event filter: rc=%Rrc\n", rc));
1113 }
1114 else
1115 LogRel(("VGDrvCommonInitDevExt: VBoxReportGuestInfo failed: rc=%Rrc\n", rc));
1116 VbglGRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
1117 }
1118 else
1119 LogRel(("VGDrvCommonInitDevExt: VBoxGRAlloc failed: rc=%Rrc\n", rc));
1120
1121 VbglR0TerminatePrimary();
1122 }
1123 else
1124 LogRel(("VGDrvCommonInitDevExt: VbglInit failed: rc=%Rrc\n", rc));
1125
1126 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
1127 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
1128 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
1129
1130#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
1131 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
1132 RTLogDestroy(RTLogSetDefaultInstance(NULL));
1133#endif
1134 return rc; /* (failed) */
1135}
1136
1137
1138/**
1139 * Deletes all the items in a wait chain.
1140 * @param pList The head of the chain.
1141 */
1142static void vgdrvDeleteWaitList(PRTLISTNODE pList)
1143{
1144 while (!RTListIsEmpty(pList))
1145 {
1146 int rc2;
1147 PVBOXGUESTWAIT pWait = RTListGetFirst(pList, VBOXGUESTWAIT, ListNode);
1148 RTListNodeRemove(&pWait->ListNode);
1149
1150 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
1151 pWait->Event = NIL_RTSEMEVENTMULTI;
1152 pWait->pSession = NULL;
1153 RTMemFree(pWait);
1154 }
1155}
1156
1157
1158/**
1159 * Destroys the VBoxGuest device extension.
1160 *
1161 * The native code should call this before the driver is loaded,
1162 * but don't call this on shutdown.
1163 *
1164 * @param pDevExt The device extension.
1165 */
1166void VGDrvCommonDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
1167{
1168 int rc2;
1169 Log(("VGDrvCommonDeleteDevExt:\n"));
1170 Log(("VBoxGuest: The additions driver is terminating.\n"));
1171
1172 /*
1173 * Stop and destroy HB timer and
1174 * disable host heartbeat checking.
1175 */
1176 if (pDevExt->pHeartbeatTimer)
1177 {
1178 RTTimerDestroy(pDevExt->pHeartbeatTimer);
1179 vgdrvHeartbeatHostConfigure(pDevExt, false);
1180 }
1181
1182 VbglGRFree(pDevExt->pReqGuestHeartbeat);
1183 pDevExt->pReqGuestHeartbeat = NULL;
1184
1185 /*
1186 * Clean up the bits that involves the host first.
1187 */
1188 vgdrvTermUnfixGuestMappings(pDevExt);
1189 if (!RTListIsEmpty(&pDevExt->SessionList))
1190 {
1191 LogRelFunc(("session list not empty!\n"));
1192 RTListInit(&pDevExt->SessionList);
1193 }
1194 /* Update the host flags (mouse status etc) not to reflect this session. */
1195 pDevExt->fFixedEvents = 0;
1196 vgdrvResetEventFilterOnHost(pDevExt, 0 /*fFixedEvents*/);
1197 vgdrvResetCapabilitiesOnHost(pDevExt);
1198 vgdrvResetMouseStatusOnHost(pDevExt);
1199
1200 vgdrvCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
1201
1202 /*
1203 * Cleanup all the other resources.
1204 */
1205 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
1206 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
1207 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
1208
1209 vgdrvDeleteWaitList(&pDevExt->WaitList);
1210#ifdef VBOX_WITH_HGCM
1211 vgdrvDeleteWaitList(&pDevExt->HGCMWaitList);
1212#endif
1213#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1214 vgdrvDeleteWaitList(&pDevExt->WakeUpList);
1215#endif
1216 vgdrvDeleteWaitList(&pDevExt->WokenUpList);
1217 vgdrvDeleteWaitList(&pDevExt->FreeList);
1218
1219 VbglR0TerminatePrimary();
1220
1221 pDevExt->pVMMDevMemory = NULL;
1222
1223 pDevExt->IOPortBase = 0;
1224 pDevExt->pIrqAckEvents = NULL;
1225
1226#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
1227 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
1228 RTLogDestroy(RTLogSetDefaultInstance(NULL));
1229#endif
1230
1231}
1232
1233
1234/**
1235 * Creates a VBoxGuest user session.
1236 *
1237 * The native code calls this when a ring-3 client opens the device.
1238 * Use VGDrvCommonCreateKernelSession when a ring-0 client connects.
1239 *
1240 * @returns VBox status code.
1241 * @param pDevExt The device extension.
1242 * @param ppSession Where to store the session on success.
1243 */
1244int VGDrvCommonCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
1245{
1246 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
1247 if (RT_UNLIKELY(!pSession))
1248 {
1249 LogRel(("VGDrvCommonCreateUserSession: no memory!\n"));
1250 return VERR_NO_MEMORY;
1251 }
1252
1253 pSession->Process = RTProcSelf();
1254 pSession->R0Process = RTR0ProcHandleSelf();
1255 pSession->pDevExt = pDevExt;
1256 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1257 RTListAppend(&pDevExt->SessionList, &pSession->ListNode);
1258 pDevExt->cSessions++;
1259 RTSpinlockRelease(pDevExt->SessionSpinlock);
1260
1261 *ppSession = pSession;
1262 LogFlow(("VGDrvCommonCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1263 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1264 return VINF_SUCCESS;
1265}
1266
1267
1268/**
1269 * Creates a VBoxGuest kernel session.
1270 *
1271 * The native code calls this when a ring-0 client connects to the device.
1272 * Use VGDrvCommonCreateUserSession when a ring-3 client opens the device.
1273 *
1274 * @returns VBox status code.
1275 * @param pDevExt The device extension.
1276 * @param ppSession Where to store the session on success.
1277 */
1278int VGDrvCommonCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
1279{
1280 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
1281 if (RT_UNLIKELY(!pSession))
1282 {
1283 LogRel(("VGDrvCommonCreateKernelSession: no memory!\n"));
1284 return VERR_NO_MEMORY;
1285 }
1286
1287 pSession->Process = NIL_RTPROCESS;
1288 pSession->R0Process = NIL_RTR0PROCESS;
1289 pSession->pDevExt = pDevExt;
1290 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1291 RTListAppend(&pDevExt->SessionList, &pSession->ListNode);
1292 pDevExt->cSessions++;
1293 RTSpinlockRelease(pDevExt->SessionSpinlock);
1294
1295 *ppSession = pSession;
1296 LogFlow(("VGDrvCommonCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1297 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1298 return VINF_SUCCESS;
1299}
1300
1301
1302/**
1303 * Closes a VBoxGuest session.
1304 *
1305 * @param pDevExt The device extension.
1306 * @param pSession The session to close (and free).
1307 */
1308void VGDrvCommonCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1309{
1310#ifdef VBOX_WITH_HGCM
1311 unsigned i;
1312#endif
1313 LogFlow(("VGDrvCommonCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1314 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1315
1316 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1317 RTListNodeRemove(&pSession->ListNode);
1318 pDevExt->cSessions--;
1319 RTSpinlockRelease(pDevExt->SessionSpinlock);
1320 vgdrvAcquireSessionCapabilities(pDevExt, pSession, 0, UINT32_MAX, VBGL_IOC_AGC_FLAGS_DEFAULT, true /*fSessionTermination*/);
1321 vgdrvSetSessionCapabilities(pDevExt, pSession, 0 /*fOrMask*/, UINT32_MAX /*fNotMask*/,
1322 NULL /*pfSessionCaps*/, NULL /*pfGlobalCaps*/, true /*fSessionTermination*/);
1323 vgdrvSetSessionEventFilter(pDevExt, pSession, 0 /*fOrMask*/, UINT32_MAX /*fNotMask*/, true /*fSessionTermination*/);
1324 vgdrvSetSessionMouseStatus(pDevExt, pSession, 0 /*fOrMask*/, UINT32_MAX /*fNotMask*/, true /*fSessionTermination*/);
1325
1326 vgdrvIoCtl_CancelAllWaitEvents(pDevExt, pSession);
1327
1328#ifdef VBOX_WITH_HGCM
1329 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1330 if (pSession->aHGCMClientIds[i])
1331 {
1332 uint32_t idClient = pSession->aHGCMClientIds[i];
1333 pSession->aHGCMClientIds[i] = 0;
1334 Log(("VGDrvCommonCloseSession: disconnecting client id %#RX32\n", idClient));
1335 VbglR0HGCMInternalDisconnect(idClient, vgdrvHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1336 }
1337#endif
1338
1339 pSession->pDevExt = NULL;
1340 pSession->Process = NIL_RTPROCESS;
1341 pSession->R0Process = NIL_RTR0PROCESS;
1342 vgdrvCloseMemBalloon(pDevExt, pSession);
1343 RTMemFree(pSession);
1344}
1345
1346
1347/**
1348 * Allocates a wait-for-event entry.
1349 *
1350 * @returns The wait-for-event entry.
1351 * @param pDevExt The device extension.
1352 * @param pSession The session that's allocating this. Can be NULL.
1353 */
1354static PVBOXGUESTWAIT vgdrvWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1355{
1356 /*
1357 * Allocate it one way or the other.
1358 */
1359 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1360 if (pWait)
1361 {
1362 RTSpinlockAcquire(pDevExt->EventSpinlock);
1363
1364 pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1365 if (pWait)
1366 RTListNodeRemove(&pWait->ListNode);
1367
1368 RTSpinlockRelease(pDevExt->EventSpinlock);
1369 }
1370 if (!pWait)
1371 {
1372 int rc;
1373
1374 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1375 if (!pWait)
1376 {
1377 LogRelMax(32, ("vgdrvWaitAlloc: out-of-memory!\n"));
1378 return NULL;
1379 }
1380
1381 rc = RTSemEventMultiCreate(&pWait->Event);
1382 if (RT_FAILURE(rc))
1383 {
1384 LogRelMax(32, ("vgdrvWaitAlloc: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
1385 RTMemFree(pWait);
1386 return NULL;
1387 }
1388
1389 pWait->ListNode.pNext = NULL;
1390 pWait->ListNode.pPrev = NULL;
1391 }
1392
1393 /*
1394 * Zero members just as an precaution.
1395 */
1396 pWait->fReqEvents = 0;
1397 pWait->fResEvents = 0;
1398#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1399 pWait->fPendingWakeUp = false;
1400 pWait->fFreeMe = false;
1401#endif
1402 pWait->pSession = pSession;
1403#ifdef VBOX_WITH_HGCM
1404 pWait->pHGCMReq = NULL;
1405#endif
1406 RTSemEventMultiReset(pWait->Event);
1407 return pWait;
1408}
1409
1410
1411/**
1412 * Frees the wait-for-event entry.
1413 *
1414 * The caller must own the wait spinlock !
1415 * The entry must be in a list!
1416 *
1417 * @param pDevExt The device extension.
1418 * @param pWait The wait-for-event entry to free.
1419 */
1420static void vgdrvWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1421{
1422 pWait->fReqEvents = 0;
1423 pWait->fResEvents = 0;
1424#ifdef VBOX_WITH_HGCM
1425 pWait->pHGCMReq = NULL;
1426#endif
1427#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1428 Assert(!pWait->fFreeMe);
1429 if (pWait->fPendingWakeUp)
1430 pWait->fFreeMe = true;
1431 else
1432#endif
1433 {
1434 RTListNodeRemove(&pWait->ListNode);
1435 RTListAppend(&pDevExt->FreeList, &pWait->ListNode);
1436 }
1437}
1438
1439
1440/**
1441 * Frees the wait-for-event entry.
1442 *
1443 * @param pDevExt The device extension.
1444 * @param pWait The wait-for-event entry to free.
1445 */
1446static void vgdrvWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1447{
1448 RTSpinlockAcquire(pDevExt->EventSpinlock);
1449 vgdrvWaitFreeLocked(pDevExt, pWait);
1450 RTSpinlockRelease(pDevExt->EventSpinlock);
1451}
1452
1453
1454#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1455/**
1456 * Processes the wake-up list.
1457 *
1458 * All entries in the wake-up list gets signalled and moved to the woken-up
1459 * list.
1460 * At least on Windows this function can be invoked concurrently from
1461 * different VCPUs. So, be thread-safe.
1462 *
1463 * @param pDevExt The device extension.
1464 */
1465void VGDrvCommonWaitDoWakeUps(PVBOXGUESTDEVEXT pDevExt)
1466{
1467 if (!RTListIsEmpty(&pDevExt->WakeUpList))
1468 {
1469 RTSpinlockAcquire(pDevExt->EventSpinlock);
1470 for (;;)
1471 {
1472 int rc;
1473 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->WakeUpList, VBOXGUESTWAIT, ListNode);
1474 if (!pWait)
1475 break;
1476 /* Prevent other threads from accessing pWait when spinlock is released. */
1477 RTListNodeRemove(&pWait->ListNode);
1478
1479 pWait->fPendingWakeUp = true;
1480 RTSpinlockRelease(pDevExt->EventSpinlock);
1481
1482 rc = RTSemEventMultiSignal(pWait->Event);
1483 AssertRC(rc);
1484
1485 RTSpinlockAcquire(pDevExt->EventSpinlock);
1486 Assert(pWait->ListNode.pNext == NULL && pWait->ListNode.pPrev == NULL);
1487 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1488 pWait->fPendingWakeUp = false;
1489 if (RT_LIKELY(!pWait->fFreeMe))
1490 { /* likely */ }
1491 else
1492 {
1493 pWait->fFreeMe = false;
1494 vgdrvWaitFreeLocked(pDevExt, pWait);
1495 }
1496 }
1497 RTSpinlockRelease(pDevExt->EventSpinlock);
1498 }
1499}
1500#endif /* VBOXGUEST_USE_DEFERRED_WAKE_UP */
1501
1502
1503/**
1504 * Implements the fast (no input or output) type of IOCtls.
1505 *
1506 * This is currently just a placeholder stub inherited from the support driver code.
1507 *
1508 * @returns VBox status code.
1509 * @param iFunction The IOCtl function number.
1510 * @param pDevExt The device extension.
1511 * @param pSession The session.
1512 */
1513int VGDrvCommonIoCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1514{
1515 LogFlow(("VGDrvCommonIoCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1516
1517 NOREF(iFunction);
1518 NOREF(pDevExt);
1519 NOREF(pSession);
1520 return VERR_NOT_SUPPORTED;
1521}
1522
1523
1524/**
1525 * Gets the driver I/O control interface version, maybe adjusting it for
1526 * backwards compatibility.
1527 *
1528 * The adjusting is currently not implemented as we only have one major I/O
1529 * control interface version out there to support. This is something we will
1530 * implement as needed.
1531 *
1532 * returns IPRT status code.
1533 * @param pDevExt The device extension.
1534 * @param pSession The session.
1535 * @param pReq The request info.
1536 */
1537static int vgdrvIoCtl_DriverVersionInfo(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCDRIVERVERSIONINFO pReq)
1538{
1539 int rc;
1540 LogFlow(("VBGL_IOCTL_DRIVER_VERSION_INFO: uReqVersion=%#x uMinVersion=%#x uReserved1=%#x uReserved2=%#x\n",
1541 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, pReq->u.In.uReserved1, pReq->u.In.uReserved2));
1542 RT_NOREF2(pDevExt, pSession);
1543
1544 /*
1545 * Input validation.
1546 */
1547 if ( pReq->u.In.uMinVersion <= pReq->u.In.uReqVersion
1548 && RT_HI_U16(pReq->u.In.uMinVersion) == RT_HI_U16(pReq->u.In.uReqVersion))
1549 {
1550 /*
1551 * Match the version.
1552 * The current logic is very simple, match the major interface version.
1553 */
1554 if ( pReq->u.In.uMinVersion <= VBGL_IOC_VERSION
1555 && RT_HI_U16(pReq->u.In.uMinVersion) == RT_HI_U16(VBGL_IOC_VERSION))
1556 rc = VINF_SUCCESS;
1557 else
1558 {
1559 LogRel(("VBGL_IOCTL_DRIVER_VERSION_INFO: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1560 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, VBGL_IOC_VERSION));
1561 rc = VERR_VERSION_MISMATCH;
1562 }
1563 }
1564 else
1565 {
1566 LogRel(("VBGL_IOCTL_DRIVER_VERSION_INFO: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
1567 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
1568 rc = VERR_INVALID_PARAMETER;
1569 }
1570
1571 pReq->u.Out.uSessionVersion = RT_SUCCESS(rc) ? VBGL_IOC_VERSION : UINT32_MAX;
1572 pReq->u.Out.uDriverVersion = VBGL_IOC_VERSION;
1573 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1574 pReq->u.Out.uReserved1 = 0;
1575 pReq->u.Out.uReserved2 = 0;
1576 return rc;
1577}
1578
1579
1580/**
1581 * Similar to vgdrvIoCtl_DriverVersionInfo, except its for IDC.
1582 *
1583 * returns IPRT status code.
1584 * @param pDevExt The device extension.
1585 * @param pSession The session.
1586 * @param pReq The request info.
1587 */
1588static int vgdrvIoCtl_IdcConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCIDCCONNECT pReq)
1589{
1590 int rc;
1591 LogFlow(("VBGL_IOCTL_IDC_CONNECT: u32MagicCookie=%#x uReqVersion=%#x uMinVersion=%#x uReserved=%#x\n",
1592 pReq->u.In.u32MagicCookie, pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, pReq->u.In.uReserved));
1593 Assert(pSession != NULL);
1594 RT_NOREF(pDevExt);
1595
1596 /*
1597 * Input validation.
1598 */
1599 if (pReq->u.In.u32MagicCookie == VBGL_IOCTL_IDC_CONNECT_MAGIC_COOKIE)
1600 {
1601 if ( pReq->u.In.uMinVersion <= pReq->u.In.uReqVersion
1602 && RT_HI_U16(pReq->u.In.uMinVersion) == RT_HI_U16(pReq->u.In.uReqVersion))
1603 {
1604 /*
1605 * Match the version.
1606 * The current logic is very simple, match the major interface version.
1607 */
1608 if ( pReq->u.In.uMinVersion <= VBGL_IOC_VERSION
1609 && RT_HI_U16(pReq->u.In.uMinVersion) == RT_HI_U16(VBGL_IOC_VERSION))
1610 {
1611 pReq->u.Out.pvSession = pSession;
1612 pReq->u.Out.uSessionVersion = VBGL_IOC_VERSION;
1613 pReq->u.Out.uDriverVersion = VBGL_IOC_VERSION;
1614 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1615 pReq->u.Out.uReserved1 = 0;
1616 pReq->u.Out.pvReserved2 = NULL;
1617 return VINF_SUCCESS;
1618
1619 }
1620 LogRel(("VBGL_IOCTL_IDC_CONNECT: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1621 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, VBGL_IOC_VERSION));
1622 rc = VERR_VERSION_MISMATCH;
1623 }
1624 else
1625 {
1626 LogRel(("VBGL_IOCTL_IDC_CONNECT: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
1627 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
1628 rc = VERR_INVALID_PARAMETER;
1629 }
1630
1631 pReq->u.Out.pvSession = NULL;
1632 pReq->u.Out.uSessionVersion = UINT32_MAX;
1633 pReq->u.Out.uDriverVersion = VBGL_IOC_VERSION;
1634 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1635 pReq->u.Out.uReserved1 = 0;
1636 pReq->u.Out.pvReserved2 = NULL;
1637 }
1638 else
1639 {
1640 LogRel(("VBGL_IOCTL_IDC_CONNECT: u32MagicCookie=%#x expected %#x!\n",
1641 pReq->u.In.u32MagicCookie, VBGL_IOCTL_IDC_CONNECT_MAGIC_COOKIE));
1642 rc = VERR_INVALID_PARAMETER;
1643 }
1644 return rc;
1645}
1646
1647
1648/**
1649 * Counterpart to vgdrvIoCtl_IdcConnect, destroys the session.
1650 *
1651 * returns IPRT status code.
1652 * @param pDevExt The device extension.
1653 * @param pSession The session.
1654 * @param pReq The request info.
1655 */
1656static int vgdrvIoCtl_IdcDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCIDCDISCONNECT pReq)
1657{
1658 LogFlow(("VBGL_IOCTL_IDC_DISCONNECT: pvSession=%p vs pSession=%p\n", pReq->u.In.pvSession, pSession));
1659 RT_NOREF(pDevExt);
1660 Assert(pSession != NULL);
1661
1662 if (pReq->u.In.pvSession == pSession)
1663 {
1664 VGDrvCommonCloseSession(pDevExt, pSession);
1665 return VINF_SUCCESS;
1666 }
1667 LogRel(("VBGL_IOCTL_IDC_DISCONNECT: In.pvSession=%p is not equal to pSession=%p!\n", pReq->u.In.pvSession, pSession));
1668 return VERR_INVALID_PARAMETER;
1669}
1670
1671
1672/**
1673 * Return the VMM device I/O info.
1674 *
1675 * returns IPRT status code.
1676 * @param pDevExt The device extension.
1677 * @param pInfo The request info.
1678 * @note Ring-0 only, caller checked.
1679 */
1680static int vgdrvIoCtl_GetVMMDevIoInfo(PVBOXGUESTDEVEXT pDevExt, PVBGLIOCGETVMMDEVIOINFO pInfo)
1681{
1682 LogFlow(("VBGL_IOCTL_GET_VMMDEV_IO_INFO\n"));
1683
1684 pInfo->u.Out.IoPort = pDevExt->IOPortBase;
1685 pInfo->u.Out.pvVmmDevMapping = pDevExt->pVMMDevMemory;
1686 pInfo->u.Out.auPadding[0] = 0;
1687#if HC_ARCH_BITS != 32
1688 pInfo->u.Out.auPadding[1] = 0;
1689 pInfo->u.Out.auPadding[2] = 0;
1690#endif
1691 return VINF_SUCCESS;
1692}
1693
1694
1695/**
1696 * Set the callback for the kernel mouse handler.
1697 *
1698 * returns IPRT status code.
1699 * @param pDevExt The device extension.
1700 * @param pNotify The new callback information.
1701 */
1702int vgdrvIoCtl_SetMouseNotifyCallback(PVBOXGUESTDEVEXT pDevExt, PVBGLIOCSETMOUSENOTIFYCALLBACK pNotify)
1703{
1704 LogFlow(("VBOXGUEST_IOCTL_SET_MOUSE_NOTIFY_CALLBACK: pfnNotify=%p pvUser=%p\n", pNotify->u.In.pfnNotify, pNotify->u.In.pvUser));
1705
1706#ifdef VBOXGUEST_MOUSE_NOTIFY_CAN_PREEMPT
1707 VGDrvNativeSetMouseNotifyCallback(pDevExt, pNotify);
1708#else
1709 RTSpinlockAcquire(pDevExt->EventSpinlock);
1710 pDevExt->pfnMouseNotifyCallback = pNotify->u.In.pfnNotify;
1711 pDevExt->pvMouseNotifyCallbackArg = pNotify->u.In.pvUser;
1712 RTSpinlockRelease(pDevExt->EventSpinlock);
1713#endif
1714 return VINF_SUCCESS;
1715}
1716
1717
1718/**
1719 * Worker vgdrvIoCtl_WaitEvent.
1720 *
1721 * The caller enters the spinlock, we leave it.
1722 *
1723 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
1724 */
1725DECLINLINE(int) vbdgCheckWaitEventCondition(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1726 PVBGLIOCWAITFOREVENTS pInfo, int iEvent, const uint32_t fReqEvents)
1727{
1728 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
1729 if (fMatches & VBOXGUEST_ACQUIRE_STYLE_EVENTS)
1730 fMatches &= vgdrvGetAllowedEventMaskForSession(pDevExt, pSession);
1731 if (fMatches || pSession->fPendingCancelWaitEvents)
1732 {
1733 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
1734 RTSpinlockRelease(pDevExt->EventSpinlock);
1735
1736 pInfo->u.Out.fEvents = fMatches;
1737 if (fReqEvents & ~((uint32_t)1 << iEvent))
1738 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x\n", pInfo->u.Out.fEvents));
1739 else
1740 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x/%d\n", pInfo->u.Out.fEvents, iEvent));
1741 pSession->fPendingCancelWaitEvents = false;
1742 return VINF_SUCCESS;
1743 }
1744
1745 RTSpinlockRelease(pDevExt->EventSpinlock);
1746 return VERR_TIMEOUT;
1747}
1748
1749
1750static int vgdrvIoCtl_WaitForEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1751 PVBGLIOCWAITFOREVENTS pInfo, bool fInterruptible)
1752{
1753 uint32_t const cMsTimeout = pInfo->u.In.cMsTimeOut;
1754 const uint32_t fReqEvents = pInfo->u.In.fEvents;
1755 uint32_t fResEvents;
1756 int iEvent;
1757 PVBOXGUESTWAIT pWait;
1758 int rc;
1759
1760 pInfo->u.Out.fEvents = 0; /* Note! This overwrites pInfo->u.In.* fields! */
1761
1762 /*
1763 * Copy and verify the input mask.
1764 */
1765 iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
1766 if (RT_UNLIKELY(iEvent < 0))
1767 {
1768 LogRel(("VBOXGUEST_IOCTL_WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
1769 return VERR_INVALID_PARAMETER;
1770 }
1771
1772 /*
1773 * Check the condition up front, before doing the wait-for-event allocations.
1774 */
1775 RTSpinlockAcquire(pDevExt->EventSpinlock);
1776 rc = vbdgCheckWaitEventCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
1777 if (rc == VINF_SUCCESS)
1778 return rc;
1779
1780 if (!cMsTimeout)
1781 {
1782 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns VERR_TIMEOUT\n"));
1783 return VERR_TIMEOUT;
1784 }
1785
1786 pWait = vgdrvWaitAlloc(pDevExt, pSession);
1787 if (!pWait)
1788 return VERR_NO_MEMORY;
1789 pWait->fReqEvents = fReqEvents;
1790
1791 /*
1792 * We've got the wait entry now, re-enter the spinlock and check for the condition.
1793 * If the wait condition is met, return.
1794 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
1795 */
1796 RTSpinlockAcquire(pDevExt->EventSpinlock);
1797 RTListAppend(&pDevExt->WaitList, &pWait->ListNode);
1798 rc = vbdgCheckWaitEventCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
1799 if (rc == VINF_SUCCESS)
1800 {
1801 vgdrvWaitFreeUnlocked(pDevExt, pWait);
1802 return rc;
1803 }
1804
1805 if (fInterruptible)
1806 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMsTimeout == UINT32_MAX ? RT_INDEFINITE_WAIT : cMsTimeout);
1807 else
1808 rc = RTSemEventMultiWait(pWait->Event, cMsTimeout == UINT32_MAX ? RT_INDEFINITE_WAIT : cMsTimeout);
1809
1810 /*
1811 * There is one special case here and that's when the semaphore is
1812 * destroyed upon device driver unload. This shouldn't happen of course,
1813 * but in case it does, just get out of here ASAP.
1814 */
1815 if (rc == VERR_SEM_DESTROYED)
1816 return rc;
1817
1818 /*
1819 * Unlink the wait item and dispose of it.
1820 */
1821 RTSpinlockAcquire(pDevExt->EventSpinlock);
1822 fResEvents = pWait->fResEvents;
1823 vgdrvWaitFreeLocked(pDevExt, pWait);
1824 RTSpinlockRelease(pDevExt->EventSpinlock);
1825
1826 /*
1827 * Now deal with the return code.
1828 */
1829 if ( fResEvents
1830 && fResEvents != UINT32_MAX)
1831 {
1832 pInfo->u.Out.fEvents = fResEvents;
1833 if (fReqEvents & ~((uint32_t)1 << iEvent))
1834 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x\n", pInfo->u.Out.fEvents));
1835 else
1836 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %#x/%d\n", pInfo->u.Out.fEvents, iEvent));
1837 rc = VINF_SUCCESS;
1838 }
1839 else if ( fResEvents == UINT32_MAX
1840 || rc == VERR_INTERRUPTED)
1841 {
1842 rc = VERR_INTERRUPTED;
1843 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns VERR_INTERRUPTED\n"));
1844 }
1845 else if (rc == VERR_TIMEOUT)
1846 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns VERR_TIMEOUT (2)\n"));
1847 else
1848 {
1849 if (RT_SUCCESS(rc))
1850 {
1851 LogRelMax(32, ("VBOXGUEST_IOCTL_WAITEVENT: returns %Rrc but no events!\n", rc));
1852 rc = VERR_INTERNAL_ERROR;
1853 }
1854 LogFlow(("VBOXGUEST_IOCTL_WAITEVENT: returns %Rrc\n", rc));
1855 }
1856
1857 return rc;
1858}
1859
1860
1861/** @todo the semantics of this IoCtl have been tightened, so that no calls to
1862 * VBOXGUEST_IOCTL_WAITEVENT are allowed in a session after it has been
1863 * called. Change the code to make calls to VBOXGUEST_IOCTL_WAITEVENT made
1864 * after that to return VERR_INTERRUPTED or something appropriate. */
1865static int vgdrvIoCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1866{
1867 PVBOXGUESTWAIT pWait;
1868 PVBOXGUESTWAIT pSafe;
1869 int rc = 0;
1870 /* Was as least one WAITEVENT in process for this session? If not we
1871 * set a flag that the next call should be interrupted immediately. This
1872 * is needed so that a user thread can reliably interrupt another one in a
1873 * WAITEVENT loop. */
1874 bool fCancelledOne = false;
1875
1876 LogFlow(("VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS\n"));
1877
1878 /*
1879 * Walk the event list and wake up anyone with a matching session.
1880 */
1881 RTSpinlockAcquire(pDevExt->EventSpinlock);
1882 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
1883 {
1884 if (pWait->pSession == pSession)
1885 {
1886 fCancelledOne = true;
1887 pWait->fResEvents = UINT32_MAX;
1888 RTListNodeRemove(&pWait->ListNode);
1889#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1890 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
1891#else
1892 rc |= RTSemEventMultiSignal(pWait->Event);
1893 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1894#endif
1895 }
1896 }
1897 if (!fCancelledOne)
1898 pSession->fPendingCancelWaitEvents = true;
1899 RTSpinlockRelease(pDevExt->EventSpinlock);
1900 Assert(rc == 0);
1901 NOREF(rc);
1902
1903#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1904 VGDrvCommonWaitDoWakeUps(pDevExt);
1905#endif
1906
1907 return VINF_SUCCESS;
1908}
1909
1910
1911/**
1912 * Checks if the VMM request is allowed in the context of the given session.
1913 *
1914 * @returns VINF_SUCCESS or VERR_PERMISSION_DENIED.
1915 * @param pDevExt The device extension.
1916 * @param pSession The calling session.
1917 * @param enmType The request type.
1918 * @param pReqHdr The request.
1919 */
1920static int vgdrvCheckIfVmmReqIsAllowed(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VMMDevRequestType enmType,
1921 VMMDevRequestHeader const *pReqHdr)
1922{
1923 /*
1924 * Categorize the request being made.
1925 */
1926 /** @todo This need quite some more work! */
1927 enum
1928 {
1929 kLevel_Invalid, kLevel_NoOne, kLevel_OnlyVBoxGuest, kLevel_OnlyKernel, kLevel_TrustedUsers, kLevel_AllUsers
1930 } enmRequired;
1931 RT_NOREF1(pDevExt);
1932
1933 switch (enmType)
1934 {
1935 /*
1936 * Deny access to anything we don't know or provide specialized I/O controls for.
1937 */
1938#ifdef VBOX_WITH_HGCM
1939 case VMMDevReq_HGCMConnect:
1940 case VMMDevReq_HGCMDisconnect:
1941# ifdef VBOX_WITH_64_BITS_GUESTS
1942 case VMMDevReq_HGCMCall32:
1943 case VMMDevReq_HGCMCall64:
1944# else
1945 case VMMDevReq_HGCMCall:
1946# endif /* VBOX_WITH_64_BITS_GUESTS */
1947 case VMMDevReq_HGCMCancel:
1948 case VMMDevReq_HGCMCancel2:
1949#endif /* VBOX_WITH_HGCM */
1950 case VMMDevReq_SetGuestCapabilities:
1951 default:
1952 enmRequired = kLevel_NoOne;
1953 break;
1954
1955 /*
1956 * There are a few things only this driver can do (and it doesn't use
1957 * the VMMRequst I/O control route anyway, but whatever).
1958 */
1959 case VMMDevReq_ReportGuestInfo:
1960 case VMMDevReq_ReportGuestInfo2:
1961 case VMMDevReq_GetHypervisorInfo:
1962 case VMMDevReq_SetHypervisorInfo:
1963 case VMMDevReq_RegisterPatchMemory:
1964 case VMMDevReq_DeregisterPatchMemory:
1965 case VMMDevReq_GetMemBalloonChangeRequest:
1966 enmRequired = kLevel_OnlyVBoxGuest;
1967 break;
1968
1969 /*
1970 * Trusted users apps only.
1971 */
1972 case VMMDevReq_QueryCredentials:
1973 case VMMDevReq_ReportCredentialsJudgement:
1974 case VMMDevReq_RegisterSharedModule:
1975 case VMMDevReq_UnregisterSharedModule:
1976 case VMMDevReq_WriteCoreDump:
1977 case VMMDevReq_GetCpuHotPlugRequest:
1978 case VMMDevReq_SetCpuHotPlugStatus:
1979 case VMMDevReq_CheckSharedModules:
1980 case VMMDevReq_GetPageSharingStatus:
1981 case VMMDevReq_DebugIsPageShared:
1982 case VMMDevReq_ReportGuestStats:
1983 case VMMDevReq_ReportGuestUserState:
1984 case VMMDevReq_GetStatisticsChangeRequest:
1985 case VMMDevReq_ChangeMemBalloon:
1986 enmRequired = kLevel_TrustedUsers;
1987 break;
1988
1989 /*
1990 * Anyone.
1991 */
1992 case VMMDevReq_GetMouseStatus:
1993 case VMMDevReq_SetMouseStatus:
1994 case VMMDevReq_SetPointerShape:
1995 case VMMDevReq_GetHostVersion:
1996 case VMMDevReq_Idle:
1997 case VMMDevReq_GetHostTime:
1998 case VMMDevReq_SetPowerStatus:
1999 case VMMDevReq_AcknowledgeEvents:
2000 case VMMDevReq_CtlGuestFilterMask:
2001 case VMMDevReq_ReportGuestStatus:
2002 case VMMDevReq_GetDisplayChangeRequest:
2003 case VMMDevReq_VideoModeSupported:
2004 case VMMDevReq_GetHeightReduction:
2005 case VMMDevReq_GetDisplayChangeRequest2:
2006 case VMMDevReq_VideoModeSupported2:
2007 case VMMDevReq_VideoAccelEnable:
2008 case VMMDevReq_VideoAccelFlush:
2009 case VMMDevReq_VideoSetVisibleRegion:
2010 case VMMDevReq_GetDisplayChangeRequestEx:
2011 case VMMDevReq_GetSeamlessChangeRequest:
2012 case VMMDevReq_GetVRDPChangeRequest:
2013 case VMMDevReq_LogString:
2014 case VMMDevReq_GetSessionId:
2015 enmRequired = kLevel_AllUsers;
2016 break;
2017
2018 /*
2019 * Depends on the request parameters...
2020 */
2021 /** @todo this have to be changed into an I/O control and the facilities
2022 * tracked in the session so they can automatically be failed when the
2023 * session terminates without reporting the new status.
2024 *
2025 * The information presented by IGuest is not reliable without this! */
2026 case VMMDevReq_ReportGuestCapabilities:
2027 switch (((VMMDevReportGuestStatus const *)pReqHdr)->guestStatus.facility)
2028 {
2029 case VBoxGuestFacilityType_All:
2030 case VBoxGuestFacilityType_VBoxGuestDriver:
2031 enmRequired = kLevel_OnlyVBoxGuest;
2032 break;
2033 case VBoxGuestFacilityType_VBoxService:
2034 enmRequired = kLevel_TrustedUsers;
2035 break;
2036 case VBoxGuestFacilityType_VBoxTrayClient:
2037 case VBoxGuestFacilityType_Seamless:
2038 case VBoxGuestFacilityType_Graphics:
2039 default:
2040 enmRequired = kLevel_AllUsers;
2041 break;
2042 }
2043 break;
2044 }
2045
2046 /*
2047 * Check against the session.
2048 */
2049 switch (enmRequired)
2050 {
2051 default:
2052 case kLevel_NoOne:
2053 break;
2054 case kLevel_OnlyVBoxGuest:
2055 case kLevel_OnlyKernel:
2056 if (pSession->R0Process == NIL_RTR0PROCESS)
2057 return VINF_SUCCESS;
2058 break;
2059 case kLevel_TrustedUsers:
2060 case kLevel_AllUsers:
2061 return VINF_SUCCESS;
2062 }
2063
2064 return VERR_PERMISSION_DENIED;
2065}
2066
2067static int vgdrvIoCtl_VMMDevRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2068 VMMDevRequestHeader *pReqHdr, size_t cbData)
2069{
2070 int rc;
2071 VMMDevRequestHeader *pReqCopy;
2072
2073 /*
2074 * Validate the header and request size.
2075 */
2076 const VMMDevRequestType enmType = pReqHdr->requestType;
2077 const uint32_t cbReq = pReqHdr->size;
2078 const uint32_t cbMinSize = (uint32_t)vmmdevGetRequestSize(enmType);
2079
2080 LogFlow(("VBOXGUEST_IOCTL_VMMREQUEST: type %d\n", pReqHdr->requestType));
2081
2082 if (cbReq < cbMinSize)
2083 {
2084 LogRel(("VBOXGUEST_IOCTL_VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
2085 cbReq, cbMinSize, enmType));
2086 return VERR_INVALID_PARAMETER;
2087 }
2088 if (cbReq > cbData)
2089 {
2090 LogRel(("VBOXGUEST_IOCTL_VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
2091 cbData, cbReq, enmType));
2092 return VERR_INVALID_PARAMETER;
2093 }
2094 rc = VbglGRVerify(pReqHdr, cbData);
2095 if (RT_FAILURE(rc))
2096 {
2097 Log(("VBOXGUEST_IOCTL_VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc!!\n",
2098 cbData, cbReq, enmType, rc));
2099 return rc;
2100 }
2101
2102 rc = vgdrvCheckIfVmmReqIsAllowed(pDevExt, pSession, enmType, pReqHdr);
2103 if (RT_FAILURE(rc))
2104 {
2105 Log(("VBOXGUEST_IOCTL_VMMREQUEST: Operation not allowed! type=%#x rc=%Rrc\n", enmType, rc));
2106 return rc;
2107 }
2108
2109 /*
2110 * Make a copy of the request in the physical memory heap so
2111 * the VBoxGuestLibrary can more easily deal with the request.
2112 * (This is really a waste of time since the OS or the OS specific
2113 * code has already buffered or locked the input/output buffer, but
2114 * it does makes things a bit simpler wrt to phys address.)
2115 */
2116 rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
2117 if (RT_FAILURE(rc))
2118 {
2119 Log(("VBOXGUEST_IOCTL_VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
2120 cbReq, cbReq, rc));
2121 return rc;
2122 }
2123 memcpy(pReqCopy, pReqHdr, cbReq);
2124 Assert(pReqCopy->reserved1 == cbReq);
2125 pReqCopy->reserved1 = 0; /* VGDrvCommonIoCtl or caller sets cbOut, so clear it. */
2126
2127 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
2128 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
2129
2130 rc = VbglGRPerform(pReqCopy);
2131 if ( RT_SUCCESS(rc)
2132 && RT_SUCCESS(pReqCopy->rc))
2133 {
2134 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
2135 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
2136
2137 memcpy(pReqHdr, pReqCopy, cbReq);
2138 pReqHdr->reserved1 = cbReq; /* preserve cbOut */
2139 }
2140 else if (RT_FAILURE(rc))
2141 Log(("VBOXGUEST_IOCTL_VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
2142 else
2143 {
2144 Log(("VBOXGUEST_IOCTL_VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
2145 rc = pReqCopy->rc;
2146 }
2147
2148 VbglGRFree(pReqCopy);
2149 return rc;
2150}
2151
2152
2153#ifdef VBOX_WITH_HGCM
2154
2155AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
2156
2157/** Worker for vgdrvHgcmAsyncWaitCallback*. */
2158static int vgdrvHgcmAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
2159 bool fInterruptible, uint32_t cMillies)
2160{
2161 int rc;
2162
2163 /*
2164 * Check to see if the condition was met by the time we got here.
2165 *
2166 * We create a simple poll loop here for dealing with out-of-memory
2167 * conditions since the caller isn't necessarily able to deal with
2168 * us returning too early.
2169 */
2170 PVBOXGUESTWAIT pWait;
2171 for (;;)
2172 {
2173 RTSpinlockAcquire(pDevExt->EventSpinlock);
2174 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
2175 {
2176 RTSpinlockRelease(pDevExt->EventSpinlock);
2177 return VINF_SUCCESS;
2178 }
2179 RTSpinlockRelease(pDevExt->EventSpinlock);
2180
2181 pWait = vgdrvWaitAlloc(pDevExt, NULL);
2182 if (pWait)
2183 break;
2184 if (fInterruptible)
2185 return VERR_INTERRUPTED;
2186 RTThreadSleep(1);
2187 }
2188 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
2189 pWait->pHGCMReq = pHdr;
2190
2191 /*
2192 * Re-enter the spinlock and re-check for the condition.
2193 * If the condition is met, return.
2194 * Otherwise link us into the HGCM wait list and go to sleep.
2195 */
2196 RTSpinlockAcquire(pDevExt->EventSpinlock);
2197 RTListAppend(&pDevExt->HGCMWaitList, &pWait->ListNode);
2198 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
2199 {
2200 vgdrvWaitFreeLocked(pDevExt, pWait);
2201 RTSpinlockRelease(pDevExt->EventSpinlock);
2202 return VINF_SUCCESS;
2203 }
2204 RTSpinlockRelease(pDevExt->EventSpinlock);
2205
2206 if (fInterruptible)
2207 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
2208 else
2209 rc = RTSemEventMultiWait(pWait->Event, cMillies);
2210 if (rc == VERR_SEM_DESTROYED)
2211 return rc;
2212
2213 /*
2214 * Unlink, free and return.
2215 */
2216 if ( RT_FAILURE(rc)
2217 && rc != VERR_TIMEOUT
2218 && ( !fInterruptible
2219 || rc != VERR_INTERRUPTED))
2220 LogRel(("vgdrvHgcmAsyncWaitCallback: wait failed! %Rrc\n", rc));
2221
2222 vgdrvWaitFreeUnlocked(pDevExt, pWait);
2223 return rc;
2224}
2225
2226
2227/**
2228 * This is a callback for dealing with async waits.
2229 *
2230 * It operates in a manner similar to vgdrvIoCtl_WaitEvent.
2231 */
2232static DECLCALLBACK(int) vgdrvHgcmAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
2233{
2234 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
2235 LogFlow(("vgdrvHgcmAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
2236 return vgdrvHgcmAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr, pDevExt,
2237 false /* fInterruptible */, u32User /* cMillies */);
2238}
2239
2240
2241/**
2242 * This is a callback for dealing with async waits with a timeout.
2243 *
2244 * It operates in a manner similar to vgdrvIoCtl_WaitEvent.
2245 */
2246static DECLCALLBACK(int) vgdrvHgcmAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
2247{
2248 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
2249 LogFlow(("vgdrvHgcmAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
2250 return vgdrvHgcmAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr, pDevExt,
2251 true /* fInterruptible */, u32User /* cMillies */);
2252}
2253
2254
2255static int vgdrvIoCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCHGCMCONNECT pInfo)
2256{
2257 int rc;
2258 HGCMCLIENTID idClient = 0;
2259
2260 /*
2261 * The VbglHGCMConnect call will invoke the callback if the HGCM
2262 * call is performed in an ASYNC fashion. The function is not able
2263 * to deal with cancelled requests.
2264 */
2265 Log(("VBOXGUEST_IOCTL_HGCM_CONNECT: %.128s\n",
2266 pInfo->u.In.Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->u.In.Loc.type == VMMDevHGCMLoc_LocalHost_Existing
2267 ? pInfo->u.In.Loc.u.host.achName : "<not local host>"));
2268
2269 rc = VbglR0HGCMInternalConnect(&pInfo->u.In.Loc, &idClient, vgdrvHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2270 Log(("VBOXGUEST_IOCTL_HGCM_CONNECT: idClient=%RX32 (rc=%Rrc)\n", idClient, rc));
2271 if (RT_SUCCESS(rc))
2272 {
2273 /*
2274 * Append the client id to the client id table.
2275 * If the table has somehow become filled up, we'll disconnect the session.
2276 */
2277 unsigned i;
2278 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2279 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2280 if (!pSession->aHGCMClientIds[i])
2281 {
2282 pSession->aHGCMClientIds[i] = idClient;
2283 break;
2284 }
2285 RTSpinlockRelease(pDevExt->SessionSpinlock);
2286 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
2287 {
2288 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
2289 VbglR0HGCMInternalDisconnect(idClient, vgdrvHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2290
2291 pInfo->u.Out.idClient = 0;
2292 return VERR_TOO_MANY_OPEN_FILES;
2293 }
2294 }
2295 pInfo->u.Out.idClient = idClient;
2296 return rc;
2297}
2298
2299
2300static int vgdrvIoCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCHGCMDISCONNECT pInfo)
2301{
2302 /*
2303 * Validate the client id and invalidate its entry while we're in the call.
2304 */
2305 int rc;
2306 const uint32_t idClient = pInfo->u.In.idClient;
2307 unsigned i;
2308 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2309 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2310 if (pSession->aHGCMClientIds[i] == idClient)
2311 {
2312 pSession->aHGCMClientIds[i] = UINT32_MAX;
2313 break;
2314 }
2315 RTSpinlockRelease(pDevExt->SessionSpinlock);
2316 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
2317 {
2318 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_DISCONNECT: idClient=%RX32\n", idClient));
2319 return VERR_INVALID_HANDLE;
2320 }
2321
2322 /*
2323 * The VbglHGCMConnect call will invoke the callback if the HGCM
2324 * call is performed in an ASYNC fashion. The function is not able
2325 * to deal with cancelled requests.
2326 */
2327 Log(("VBOXGUEST_IOCTL_HGCM_DISCONNECT: idClient=%RX32\n", idClient));
2328 rc = VbglR0HGCMInternalDisconnect(idClient, vgdrvHgcmAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2329 LogFlow(("VBOXGUEST_IOCTL_HGCM_DISCONNECT: rc=%Rrc\n", rc));
2330
2331 /* Update the client id array according to the result. */
2332 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2333 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
2334 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) ? 0 : idClient;
2335 RTSpinlockRelease(pDevExt->SessionSpinlock);
2336
2337 return rc;
2338}
2339
2340
2341static int vgdrvIoCtl_HGCMCallInner(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCHGCMCALL pInfo,
2342 uint32_t cMillies, bool fInterruptible, bool f32bit, bool fUserData,
2343 size_t cbExtra, size_t cbData)
2344{
2345 const uint32_t u32ClientId = pInfo->u32ClientID;
2346 uint32_t fFlags;
2347 size_t cbActual;
2348 unsigned i;
2349 int rc;
2350
2351 /*
2352 * Some more validations.
2353 */
2354 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
2355 {
2356 LogRel(("VBOXGUEST_IOCTL_HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
2357 return VERR_INVALID_PARAMETER;
2358 }
2359
2360 cbActual = cbExtra + sizeof(*pInfo);
2361#ifdef RT_ARCH_AMD64
2362 if (f32bit)
2363 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
2364 else
2365#endif
2366 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
2367 if (cbData < cbActual)
2368 {
2369 LogRel(("VBOXGUEST_IOCTL_HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
2370 cbData, cbData, cbActual, cbActual));
2371 return VERR_INVALID_PARAMETER;
2372 }
2373 pInfo->Hdr.cbOut = (uint32_t)cbActual;
2374
2375 /*
2376 * Validate the client id.
2377 */
2378 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2379 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2380 if (pSession->aHGCMClientIds[i] == u32ClientId)
2381 break;
2382 RTSpinlockRelease(pDevExt->SessionSpinlock);
2383 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
2384 {
2385 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
2386 return VERR_INVALID_HANDLE;
2387 }
2388
2389 /*
2390 * The VbglHGCMCall call will invoke the callback if the HGCM
2391 * call is performed in an ASYNC fashion. This function can
2392 * deal with cancelled requests, so we let user more requests
2393 * be interruptible (should add a flag for this later I guess).
2394 */
2395 LogFlow(("VBOXGUEST_IOCTL_HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
2396 fFlags = !fUserData && pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
2397 uint32_t cbInfo = (uint32_t)(cbData - cbExtra);
2398#ifdef RT_ARCH_AMD64
2399 if (f32bit)
2400 {
2401 if (fInterruptible)
2402 rc = VbglR0HGCMInternalCall32(pInfo, cbInfo, fFlags, vgdrvHgcmAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2403 else
2404 rc = VbglR0HGCMInternalCall32(pInfo, cbInfo, fFlags, vgdrvHgcmAsyncWaitCallback, pDevExt, cMillies);
2405 }
2406 else
2407#endif
2408 {
2409 if (fInterruptible)
2410 rc = VbglR0HGCMInternalCall(pInfo, cbInfo, fFlags, vgdrvHgcmAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2411 else
2412 rc = VbglR0HGCMInternalCall(pInfo, cbInfo, fFlags, vgdrvHgcmAsyncWaitCallback, pDevExt, cMillies);
2413 }
2414 if (RT_SUCCESS(rc))
2415 {
2416 rc = pInfo->Hdr.rc;
2417 LogFlow(("VBOXGUEST_IOCTL_HGCM_CALL: result=%Rrc\n", rc));
2418 }
2419 else
2420 {
2421 if ( rc != VERR_INTERRUPTED
2422 && rc != VERR_TIMEOUT)
2423 LogRelMax(32, ("VBOXGUEST_IOCTL_HGCM_CALL: %s Failed. rc=%Rrc (Hdr.rc=%Rrc).\n", f32bit ? "32" : "64", rc, pInfo->Hdr.rc));
2424 else
2425 Log(("VBOXGUEST_IOCTL_HGCM_CALL: %s Failed. rc=%Rrc (Hdr.rc=%Rrc).\n", f32bit ? "32" : "64", rc, pInfo->Hdr.rc));
2426 }
2427 return rc;
2428}
2429
2430
2431static int vgdrvIoCtl_HGCMCallWrapper(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCHGCMCALL pInfo,
2432 bool f32bit, bool fUserData, size_t cbData)
2433{
2434 return vgdrvIoCtl_HGCMCallInner(pDevExt, pSession, pInfo, pInfo->cMsTimeout,
2435 pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2436 f32bit, fUserData, 0 /*cbExtra*/, cbData);
2437}
2438
2439
2440#endif /* VBOX_WITH_HGCM */
2441
2442/**
2443 * Handle VBGL_IOCTL_CHECK_BALLOON from R3.
2444 *
2445 * Ask the host for the size of the balloon and try to set it accordingly. If
2446 * this approach fails because it's not supported, return with fHandleInR3 set
2447 * and let the user land supply memory we can lock via the other ioctl.
2448 *
2449 * @returns VBox status code.
2450 *
2451 * @param pDevExt The device extension.
2452 * @param pSession The session.
2453 * @param pInfo The output buffer.
2454 */
2455static int vgdrvIoCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCCHECKBALLOON pInfo)
2456{
2457 VMMDevGetMemBalloonChangeRequest *pReq;
2458 int rc;
2459
2460 LogFlow(("VBGL_IOCTL_CHECK_BALLOON:\n"));
2461 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2462 AssertRCReturn(rc, rc);
2463
2464 /*
2465 * The first user trying to query/change the balloon becomes the
2466 * owner and owns it until the session is closed (vgdrvCloseMemBalloon).
2467 */
2468 if ( pDevExt->MemBalloon.pOwner != pSession
2469 && pDevExt->MemBalloon.pOwner == NULL)
2470 pDevExt->MemBalloon.pOwner = pSession;
2471
2472 if (pDevExt->MemBalloon.pOwner == pSession)
2473 {
2474 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
2475 if (RT_SUCCESS(rc))
2476 {
2477 /*
2478 * This is a response to that event. Setting this bit means that
2479 * we request the value from the host and change the guest memory
2480 * balloon according to this value.
2481 */
2482 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
2483 rc = VbglGRPerform(&pReq->header);
2484 if (RT_SUCCESS(rc))
2485 {
2486 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
2487 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
2488
2489 pInfo->u.Out.cBalloonChunks = pReq->cBalloonChunks;
2490 pInfo->u.Out.fHandleInR3 = false;
2491 pInfo->u.Out.afPadding[0] = false;
2492 pInfo->u.Out.afPadding[1] = false;
2493 pInfo->u.Out.afPadding[2] = false;
2494
2495 rc = vgdrvSetBalloonSizeKernel(pDevExt, pReq->cBalloonChunks, &pInfo->u.Out.fHandleInR3);
2496 /* Ignore various out of memory failures. */
2497 if ( rc == VERR_NO_MEMORY
2498 || rc == VERR_NO_PHYS_MEMORY
2499 || rc == VERR_NO_CONT_MEMORY)
2500 rc = VINF_SUCCESS;
2501 }
2502 else
2503 LogRel(("VBGL_IOCTL_CHECK_BALLOON: VbglGRPerform failed. rc=%Rrc\n", rc));
2504 VbglGRFree(&pReq->header);
2505 }
2506 }
2507 else
2508 rc = VERR_PERMISSION_DENIED;
2509
2510 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2511 LogFlow(("VBGL_IOCTL_CHECK_BALLOON returns %Rrc\n", rc));
2512 return rc;
2513}
2514
2515
2516/**
2517 * Handle a request for changing the memory balloon.
2518 *
2519 * @returns VBox status code.
2520 *
2521 * @param pDevExt The device extention.
2522 * @param pSession The session.
2523 * @param pInfo The change request structure (input).
2524 */
2525static int vgdrvIoCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCCHANGEBALLOON pInfo)
2526{
2527 int rc;
2528 LogFlow(("VBGL_IOCTL_CHANGE_BALLOON: fInflate=%RTbool u64ChunkAddr=%p\n", pInfo->u.In.fInflate, pInfo->u.In.pvChunk));
2529 if ( pInfo->u.In.abPadding[0]
2530 || pInfo->u.In.abPadding[1]
2531 || pInfo->u.In.abPadding[2]
2532 || pInfo->u.In.abPadding[3]
2533 || pInfo->u.In.abPadding[4]
2534 || pInfo->u.In.abPadding[5]
2535 || pInfo->u.In.abPadding[6]
2536#if ARCH_BITS == 32
2537 || pInfo->u.In.abPadding[7]
2538 || pInfo->u.In.abPadding[8]
2539 || pInfo->u.In.abPadding[9]
2540#endif
2541 )
2542 {
2543 Log(("VBGL_IOCTL_CHANGE_BALLOON: Padding isn't all zero: %.*Rhxs\n", sizeof(pInfo->u.In.abPadding), pInfo->u.In.abPadding));
2544 return VERR_INVALID_PARAMETER;
2545 }
2546
2547 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2548 AssertRCReturn(rc, rc);
2549
2550 if (!pDevExt->MemBalloon.fUseKernelAPI)
2551 {
2552 /*
2553 * The first user trying to query/change the balloon becomes the
2554 * owner and owns it until the session is closed (vgdrvCloseMemBalloon).
2555 */
2556 if ( pDevExt->MemBalloon.pOwner != pSession
2557 && pDevExt->MemBalloon.pOwner == NULL)
2558 pDevExt->MemBalloon.pOwner = pSession;
2559
2560 if (pDevExt->MemBalloon.pOwner == pSession)
2561 rc = vgdrvSetBalloonSizeFromUser(pDevExt, pSession, pInfo->u.In.pvChunk, pInfo->u.In.fInflate != false);
2562 else
2563 rc = VERR_PERMISSION_DENIED;
2564 }
2565 else
2566 rc = VERR_PERMISSION_DENIED;
2567
2568 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2569 return rc;
2570}
2571
2572
2573/**
2574 * Handle a request for writing a core dump of the guest on the host.
2575 *
2576 * @returns VBox status code.
2577 *
2578 * @param pDevExt The device extension.
2579 * @param pInfo The output buffer.
2580 */
2581static int vgdrvIoCtl_WriteCoreDump(PVBOXGUESTDEVEXT pDevExt, PVBGLIOCWRITECOREDUMP pInfo)
2582{
2583 VMMDevReqWriteCoreDump *pReq = NULL;
2584 int rc;
2585 LogFlow(("VBOXGUEST_IOCTL_WRITE_CORE_DUMP\n"));
2586 RT_NOREF1(pDevExt);
2587
2588 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_WriteCoreDump);
2589 if (RT_SUCCESS(rc))
2590 {
2591 pReq->fFlags = pInfo->u.In.fFlags;
2592 rc = VbglGRPerform(&pReq->header);
2593 if (RT_FAILURE(rc))
2594 Log(("VBOXGUEST_IOCTL_WRITE_CORE_DUMP: VbglGRPerform failed, rc=%Rrc!\n", rc));
2595
2596 VbglGRFree(&pReq->header);
2597 }
2598 else
2599 Log(("VBOXGUEST_IOCTL_WRITE_CORE_DUMP: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
2600 sizeof(*pReq), sizeof(*pReq), rc));
2601 return rc;
2602}
2603
2604
2605/**
2606 * Guest backdoor logging.
2607 *
2608 * @returns VBox status code.
2609 *
2610 * @param pDevExt The device extension.
2611 * @param pch The log message (need not be NULL terminated).
2612 * @param cbData Size of the buffer.
2613 * @param fUserSession Copy of VBOXGUESTSESSION::fUserSession for the
2614 * call. True normal user, false root user.
2615 */
2616static int vgdrvIoCtl_Log(PVBOXGUESTDEVEXT pDevExt, const char *pch, size_t cbData, bool fUserSession)
2617{
2618 if (pDevExt->fLoggingEnabled)
2619 RTLogBackdoorPrintf("%.*s", cbData, pch);
2620 else if (!fUserSession)
2621 LogRel(("%.*s", cbData, pch));
2622 else
2623 Log(("%.*s", cbData, pch));
2624 return VINF_SUCCESS;
2625}
2626
2627
2628/** @name Guest Capabilities, Mouse Status and Event Filter
2629 * @{
2630 */
2631
2632/**
2633 * Clears a bit usage tracker (init time).
2634 *
2635 * @param pTracker The tracker to clear.
2636 */
2637static void vgdrvBitUsageTrackerClear(PVBOXGUESTBITUSAGETRACER pTracker)
2638{
2639 uint32_t iBit;
2640 AssertCompile(sizeof(pTracker->acPerBitUsage) == 32 * sizeof(uint32_t));
2641
2642 for (iBit = 0; iBit < 32; iBit++)
2643 pTracker->acPerBitUsage[iBit] = 0;
2644 pTracker->fMask = 0;
2645}
2646
2647
2648#ifdef VBOX_STRICT
2649/**
2650 * Checks that pTracker->fMask is correct and that the usage values are within
2651 * the valid range.
2652 *
2653 * @param pTracker The tracker.
2654 * @param cMax Max valid usage value.
2655 * @param pszWhat Identifies the tracker in assertions.
2656 */
2657static void vgdrvBitUsageTrackerCheckMask(PCVBOXGUESTBITUSAGETRACER pTracker, uint32_t cMax, const char *pszWhat)
2658{
2659 uint32_t fMask = 0;
2660 uint32_t iBit;
2661 AssertCompile(sizeof(pTracker->acPerBitUsage) == 32 * sizeof(uint32_t));
2662
2663 for (iBit = 0; iBit < 32; iBit++)
2664 if (pTracker->acPerBitUsage[iBit])
2665 {
2666 fMask |= RT_BIT_32(iBit);
2667 AssertMsg(pTracker->acPerBitUsage[iBit] <= cMax,
2668 ("%s: acPerBitUsage[%u]=%#x cMax=%#x\n", pszWhat, iBit, pTracker->acPerBitUsage[iBit], cMax));
2669 }
2670
2671 AssertMsg(fMask == pTracker->fMask, ("%s: %#x vs %#x\n", pszWhat, fMask, pTracker->fMask));
2672}
2673#endif
2674
2675
2676/**
2677 * Applies a change to the bit usage tracker.
2678 *
2679 *
2680 * @returns true if the mask changed, false if not.
2681 * @param pTracker The bit usage tracker.
2682 * @param fChanged The bits to change.
2683 * @param fPrevious The previous value of the bits.
2684 * @param cMax The max valid usage value for assertions.
2685 * @param pszWhat Identifies the tracker in assertions.
2686 */
2687static bool vgdrvBitUsageTrackerChange(PVBOXGUESTBITUSAGETRACER pTracker, uint32_t fChanged, uint32_t fPrevious,
2688 uint32_t cMax, const char *pszWhat)
2689{
2690 bool fGlobalChange = false;
2691 AssertCompile(sizeof(pTracker->acPerBitUsage) == 32 * sizeof(uint32_t));
2692
2693 while (fChanged)
2694 {
2695 uint32_t const iBit = ASMBitFirstSetU32(fChanged) - 1;
2696 uint32_t const fBitMask = RT_BIT_32(iBit);
2697 Assert(iBit < 32); Assert(fBitMask & fChanged);
2698
2699 if (fBitMask & fPrevious)
2700 {
2701 pTracker->acPerBitUsage[iBit] -= 1;
2702 AssertMsg(pTracker->acPerBitUsage[iBit] <= cMax,
2703 ("%s: acPerBitUsage[%u]=%#x cMax=%#x\n", pszWhat, iBit, pTracker->acPerBitUsage[iBit], cMax));
2704 if (pTracker->acPerBitUsage[iBit] == 0)
2705 {
2706 fGlobalChange = true;
2707 pTracker->fMask &= ~fBitMask;
2708 }
2709 }
2710 else
2711 {
2712 pTracker->acPerBitUsage[iBit] += 1;
2713 AssertMsg(pTracker->acPerBitUsage[iBit] > 0 && pTracker->acPerBitUsage[iBit] <= cMax,
2714 ("pTracker->acPerBitUsage[%u]=%#x cMax=%#x\n", pszWhat, iBit, pTracker->acPerBitUsage[iBit], cMax));
2715 if (pTracker->acPerBitUsage[iBit] == 1)
2716 {
2717 fGlobalChange = true;
2718 pTracker->fMask |= fBitMask;
2719 }
2720 }
2721
2722 fChanged &= ~fBitMask;
2723 }
2724
2725#ifdef VBOX_STRICT
2726 vgdrvBitUsageTrackerCheckMask(pTracker, cMax, pszWhat);
2727#endif
2728 NOREF(pszWhat); NOREF(cMax);
2729 return fGlobalChange;
2730}
2731
2732
2733/**
2734 * Init and termination worker for resetting the (host) event filter on the host
2735 *
2736 * @returns VBox status code.
2737 * @param pDevExt The device extension.
2738 * @param fFixedEvents Fixed events (init time).
2739 */
2740static int vgdrvResetEventFilterOnHost(PVBOXGUESTDEVEXT pDevExt, uint32_t fFixedEvents)
2741{
2742 VMMDevCtlGuestFilterMask *pReq;
2743 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
2744 if (RT_SUCCESS(rc))
2745 {
2746 pReq->u32NotMask = UINT32_MAX & ~fFixedEvents;
2747 pReq->u32OrMask = fFixedEvents;
2748 rc = VbglGRPerform(&pReq->header);
2749 if (RT_FAILURE(rc))
2750 LogRelFunc(("failed with rc=%Rrc\n", rc));
2751 VbglGRFree(&pReq->header);
2752 }
2753 RT_NOREF1(pDevExt);
2754 return rc;
2755}
2756
2757
2758/**
2759 * Changes the event filter mask for the given session.
2760 *
2761 * This is called in response to VBGL_IOCTL_CHANGE_FILTER_MASK as well as to do
2762 * session cleanup.
2763 *
2764 * @returns VBox status code.
2765 * @param pDevExt The device extension.
2766 * @param pSession The session.
2767 * @param fOrMask The events to add.
2768 * @param fNotMask The events to remove.
2769 * @param fSessionTermination Set if we're called by the session cleanup code.
2770 * This tweaks the error handling so we perform
2771 * proper session cleanup even if the host
2772 * misbehaves.
2773 *
2774 * @remarks Takes the session spinlock.
2775 */
2776static int vgdrvSetSessionEventFilter(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2777 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination)
2778{
2779 VMMDevCtlGuestFilterMask *pReq;
2780 uint32_t fChanged;
2781 uint32_t fPrevious;
2782 int rc;
2783
2784 /*
2785 * Preallocate a request buffer so we can do all in one go without leaving the spinlock.
2786 */
2787 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
2788 if (RT_SUCCESS(rc))
2789 { /* nothing */ }
2790 else if (!fSessionTermination)
2791 {
2792 LogRel(("vgdrvSetSessionFilterMask: VbglGRAlloc failure: %Rrc\n", rc));
2793 return rc;
2794 }
2795 else
2796 pReq = NULL; /* Ignore failure, we must do session cleanup. */
2797
2798
2799 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2800
2801 /*
2802 * Apply the changes to the session mask.
2803 */
2804 fPrevious = pSession->fEventFilter;
2805 pSession->fEventFilter |= fOrMask;
2806 pSession->fEventFilter &= ~fNotMask;
2807
2808 /*
2809 * If anything actually changed, update the global usage counters.
2810 */
2811 fChanged = fPrevious ^ pSession->fEventFilter;
2812 LogFlow(("vgdrvSetSessionEventFilter: Session->fEventFilter: %#x -> %#x (changed %#x)\n",
2813 fPrevious, pSession->fEventFilter, fChanged));
2814 if (fChanged)
2815 {
2816 bool fGlobalChange = vgdrvBitUsageTrackerChange(&pDevExt->EventFilterTracker, fChanged, fPrevious,
2817 pDevExt->cSessions, "EventFilterTracker");
2818
2819 /*
2820 * If there are global changes, update the event filter on the host.
2821 */
2822 if (fGlobalChange || pDevExt->fEventFilterHost == UINT32_MAX)
2823 {
2824 Assert(pReq || fSessionTermination);
2825 if (pReq)
2826 {
2827 pReq->u32OrMask = pDevExt->fFixedEvents | pDevExt->EventFilterTracker.fMask;
2828 if (pReq->u32OrMask == pDevExt->fEventFilterHost)
2829 rc = VINF_SUCCESS;
2830 else
2831 {
2832 pDevExt->fEventFilterHost = pReq->u32OrMask;
2833 pReq->u32NotMask = ~pReq->u32OrMask;
2834 rc = VbglGRPerform(&pReq->header);
2835 if (RT_FAILURE(rc))
2836 {
2837 /*
2838 * Failed, roll back (unless it's session termination time).
2839 */
2840 pDevExt->fEventFilterHost = UINT32_MAX;
2841 if (!fSessionTermination)
2842 {
2843 vgdrvBitUsageTrackerChange(&pDevExt->EventFilterTracker, fChanged, pSession->fEventFilter,
2844 pDevExt->cSessions, "EventFilterTracker");
2845 pSession->fEventFilter = fPrevious;
2846 }
2847 }
2848 }
2849 }
2850 else
2851 rc = VINF_SUCCESS;
2852 }
2853 }
2854
2855 RTSpinlockRelease(pDevExt->SessionSpinlock);
2856 if (pReq)
2857 VbglGRFree(&pReq->header);
2858 return rc;
2859}
2860
2861
2862/**
2863 * Handle VBGL_IOCTL_CHANGE_FILTER_MASK.
2864 *
2865 * @returns VBox status code.
2866 *
2867 * @param pDevExt The device extension.
2868 * @param pSession The session.
2869 * @param pInfo The request.
2870 */
2871static int vgdrvIoCtl_ChangeFilterMask(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCCHANGEFILTERMASK pInfo)
2872{
2873 LogFlow(("VBGL_IOCTL_CHANGE_FILTER_MASK: or=%#x not=%#x\n", pInfo->u.In.fOrMask, pInfo->u.In.fNotMask));
2874
2875 if ((pInfo->u.In.fOrMask | pInfo->u.In.fNotMask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
2876 {
2877 Log(("VBGL_IOCTL_CHANGE_FILTER_MASK: or=%#x not=%#x: Invalid masks!\n", pInfo->u.In.fOrMask, pInfo->u.In.fNotMask));
2878 return VERR_INVALID_PARAMETER;
2879 }
2880
2881 return vgdrvSetSessionEventFilter(pDevExt, pSession, pInfo->u.In.fOrMask, pInfo->u.In.fNotMask, false /*fSessionTermination*/);
2882}
2883
2884
2885/**
2886 * Init and termination worker for set mouse feature status to zero on the host.
2887 *
2888 * @returns VBox status code.
2889 * @param pDevExt The device extension.
2890 */
2891static int vgdrvResetMouseStatusOnHost(PVBOXGUESTDEVEXT pDevExt)
2892{
2893 VMMDevReqMouseStatus *pReq;
2894 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetMouseStatus);
2895 if (RT_SUCCESS(rc))
2896 {
2897 pReq->mouseFeatures = 0;
2898 pReq->pointerXPos = 0;
2899 pReq->pointerYPos = 0;
2900 rc = VbglGRPerform(&pReq->header);
2901 if (RT_FAILURE(rc))
2902 LogRelFunc(("failed with rc=%Rrc\n", rc));
2903 VbglGRFree(&pReq->header);
2904 }
2905 RT_NOREF1(pDevExt);
2906 return rc;
2907}
2908
2909
2910/**
2911 * Changes the mouse status mask for the given session.
2912 *
2913 * This is called in response to VBOXGUEST_IOCTL_SET_MOUSE_STATUS as well as to
2914 * do session cleanup.
2915 *
2916 * @returns VBox status code.
2917 * @param pDevExt The device extension.
2918 * @param pSession The session.
2919 * @param fOrMask The status flags to add.
2920 * @param fNotMask The status flags to remove.
2921 * @param fSessionTermination Set if we're called by the session cleanup code.
2922 * This tweaks the error handling so we perform
2923 * proper session cleanup even if the host
2924 * misbehaves.
2925 *
2926 * @remarks Takes the session spinlock.
2927 */
2928static int vgdrvSetSessionMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2929 uint32_t fOrMask, uint32_t fNotMask, bool fSessionTermination)
2930{
2931 VMMDevReqMouseStatus *pReq;
2932 uint32_t fChanged;
2933 uint32_t fPrevious;
2934 int rc;
2935
2936 /*
2937 * Preallocate a request buffer so we can do all in one go without leaving the spinlock.
2938 */
2939 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetMouseStatus);
2940 if (RT_SUCCESS(rc))
2941 { /* nothing */ }
2942 else if (!fSessionTermination)
2943 {
2944 LogRel(("vgdrvSetSessionMouseStatus: VbglGRAlloc failure: %Rrc\n", rc));
2945 return rc;
2946 }
2947 else
2948 pReq = NULL; /* Ignore failure, we must do session cleanup. */
2949
2950
2951 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2952
2953 /*
2954 * Apply the changes to the session mask.
2955 */
2956 fPrevious = pSession->fMouseStatus;
2957 pSession->fMouseStatus |= fOrMask;
2958 pSession->fMouseStatus &= ~fNotMask;
2959
2960 /*
2961 * If anything actually changed, update the global usage counters.
2962 */
2963 fChanged = fPrevious ^ pSession->fMouseStatus;
2964 if (fChanged)
2965 {
2966 bool fGlobalChange = vgdrvBitUsageTrackerChange(&pDevExt->MouseStatusTracker, fChanged, fPrevious,
2967 pDevExt->cSessions, "MouseStatusTracker");
2968
2969 /*
2970 * If there are global changes, update the event filter on the host.
2971 */
2972 if (fGlobalChange || pDevExt->fMouseStatusHost == UINT32_MAX)
2973 {
2974 Assert(pReq || fSessionTermination);
2975 if (pReq)
2976 {
2977 pReq->mouseFeatures = pDevExt->MouseStatusTracker.fMask;
2978 if (pReq->mouseFeatures == pDevExt->fMouseStatusHost)
2979 rc = VINF_SUCCESS;
2980 else
2981 {
2982 pDevExt->fMouseStatusHost = pReq->mouseFeatures;
2983 pReq->pointerXPos = 0;
2984 pReq->pointerYPos = 0;
2985 rc = VbglGRPerform(&pReq->header);
2986 if (RT_FAILURE(rc))
2987 {
2988 /*
2989 * Failed, roll back (unless it's session termination time).
2990 */
2991 pDevExt->fMouseStatusHost = UINT32_MAX;
2992 if (!fSessionTermination)
2993 {
2994 vgdrvBitUsageTrackerChange(&pDevExt->MouseStatusTracker, fChanged, pSession->fMouseStatus,
2995 pDevExt->cSessions, "MouseStatusTracker");
2996 pSession->fMouseStatus = fPrevious;
2997 }
2998 }
2999 }
3000 }
3001 else
3002 rc = VINF_SUCCESS;
3003 }
3004 }
3005
3006 RTSpinlockRelease(pDevExt->SessionSpinlock);
3007 if (pReq)
3008 VbglGRFree(&pReq->header);
3009 return rc;
3010}
3011
3012
3013/**
3014 * Sets the mouse status features for this session and updates them globally.
3015 *
3016 * @returns VBox status code.
3017 *
3018 * @param pDevExt The device extention.
3019 * @param pSession The session.
3020 * @param fFeatures New bitmap of enabled features.
3021 */
3022static int vgdrvIoCtl_SetMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fFeatures)
3023{
3024 LogFlow(("VBGL_IOCTL_SET_MOUSE_STATUS: features=%#x\n", fFeatures));
3025
3026 if (fFeatures & ~VMMDEV_MOUSE_GUEST_MASK)
3027 return VERR_INVALID_PARAMETER;
3028
3029 return vgdrvSetSessionMouseStatus(pDevExt, pSession, fFeatures, ~fFeatures, false /*fSessionTermination*/);
3030}
3031
3032
3033/**
3034 * Return the mask of VMM device events that this session is allowed to see (wrt
3035 * to "acquire" mode guest capabilities).
3036 *
3037 * The events associated with guest capabilities in "acquire" mode will be
3038 * restricted to sessions which has acquired the respective capabilities.
3039 * If someone else tries to wait for acquired events, they won't be woken up
3040 * when the event becomes pending. Should some other thread in the session
3041 * acquire the capability while the corresponding event is pending, the waiting
3042 * thread will woken up.
3043 *
3044 * @returns Mask of events valid for the given session.
3045 * @param pDevExt The device extension.
3046 * @param pSession The session.
3047 *
3048 * @remarks Needs only be called when dispatching events in the
3049 * VBOXGUEST_ACQUIRE_STYLE_EVENTS mask.
3050 */
3051static uint32_t vgdrvGetAllowedEventMaskForSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
3052{
3053 uint32_t fAcquireModeGuestCaps;
3054 uint32_t fAcquiredGuestCaps;
3055 uint32_t fAllowedEvents;
3056
3057 /*
3058 * Note! Reads pSession->fAcquiredGuestCaps and pDevExt->fAcquireModeGuestCaps
3059 * WITHOUT holding VBOXGUESTDEVEXT::SessionSpinlock.
3060 */
3061 fAcquireModeGuestCaps = ASMAtomicUoReadU32(&pDevExt->fAcquireModeGuestCaps);
3062 if (fAcquireModeGuestCaps == 0)
3063 return VMMDEV_EVENT_VALID_EVENT_MASK;
3064 fAcquiredGuestCaps = ASMAtomicUoReadU32(&pSession->fAcquiredGuestCaps);
3065
3066 /*
3067 * Calculate which events to allow according to the cap config and caps
3068 * acquired by the session.
3069 */
3070 fAllowedEvents = VMMDEV_EVENT_VALID_EVENT_MASK;
3071 if ( !(fAcquiredGuestCaps & VMMDEV_GUEST_SUPPORTS_GRAPHICS)
3072 && (fAcquireModeGuestCaps & VMMDEV_GUEST_SUPPORTS_GRAPHICS))
3073 fAllowedEvents &= ~VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST;
3074
3075 if ( !(fAcquiredGuestCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
3076 && (fAcquireModeGuestCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS))
3077 fAllowedEvents &= ~VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
3078
3079 return fAllowedEvents;
3080}
3081
3082
3083/**
3084 * Init and termination worker for set guest capabilities to zero on the host.
3085 *
3086 * @returns VBox status code.
3087 * @param pDevExt The device extension.
3088 */
3089static int vgdrvResetCapabilitiesOnHost(PVBOXGUESTDEVEXT pDevExt)
3090{
3091 VMMDevReqGuestCapabilities2 *pReq;
3092 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
3093 if (RT_SUCCESS(rc))
3094 {
3095 pReq->u32NotMask = UINT32_MAX;
3096 pReq->u32OrMask = 0;
3097 rc = VbglGRPerform(&pReq->header);
3098
3099 if (RT_FAILURE(rc))
3100 LogRelFunc(("failed with rc=%Rrc\n", rc));
3101 VbglGRFree(&pReq->header);
3102 }
3103 RT_NOREF1(pDevExt);
3104 return rc;
3105}
3106
3107
3108/**
3109 * Sets the guest capabilities to the host while holding the lock.
3110 *
3111 * This will ASSUME that we're the ones in charge of the mask, so
3112 * we'll simply clear all bits we don't set.
3113 *
3114 * @returns VBox status code.
3115 * @param pDevExt The device extension.
3116 * @param pReq The request.
3117 */
3118static int vgdrvUpdateCapabilitiesOnHostWithReqAndLock(PVBOXGUESTDEVEXT pDevExt, VMMDevReqGuestCapabilities2 *pReq)
3119{
3120 int rc;
3121
3122 pReq->u32OrMask = pDevExt->fAcquiredGuestCaps | pDevExt->SetGuestCapsTracker.fMask;
3123 if (pReq->u32OrMask == pDevExt->fGuestCapsHost)
3124 rc = VINF_SUCCESS;
3125 else
3126 {
3127 pDevExt->fGuestCapsHost = pReq->u32OrMask;
3128 pReq->u32NotMask = ~pReq->u32OrMask;
3129 rc = VbglGRPerform(&pReq->header);
3130 if (RT_FAILURE(rc))
3131 pDevExt->fGuestCapsHost = UINT32_MAX;
3132 }
3133
3134 return rc;
3135}
3136
3137
3138/**
3139 * Switch a set of capabilities into "acquire" mode and (maybe) acquire them for
3140 * the given session.
3141 *
3142 * This is called in response to VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE as well as
3143 * to do session cleanup.
3144 *
3145 * @returns VBox status code.
3146 * @param pDevExt The device extension.
3147 * @param pSession The session.
3148 * @param fOrMask The capabilities to add .
3149 * @param fNotMask The capabilities to remove. Ignored in
3150 * VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE.
3151 * @param enmFlags Confusing operation modifier.
3152 * VBOXGUESTCAPSACQUIRE_FLAGS_NONE means to both
3153 * configure and acquire/release the capabilities.
3154 * VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE
3155 * means only configure capabilities in the
3156 * @a fOrMask capabilities for "acquire" mode.
3157 * @param fSessionTermination Set if we're called by the session cleanup code.
3158 * This tweaks the error handling so we perform
3159 * proper session cleanup even if the host
3160 * misbehaves.
3161 *
3162 * @remarks Takes both the session and event spinlocks.
3163 */
3164static int vgdrvAcquireSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
3165 uint32_t fOrMask, uint32_t fNotMask, uint32_t fFlags,
3166 bool fSessionTermination)
3167{
3168 uint32_t fCurrentOwnedCaps;
3169 uint32_t fSessionRemovedCaps;
3170 uint32_t fSessionAddedCaps;
3171 uint32_t fOtherConflictingCaps;
3172 VMMDevReqGuestCapabilities2 *pReq = NULL;
3173 int rc;
3174
3175
3176 /*
3177 * Validate and adjust input.
3178 */
3179 if (fOrMask & ~( VMMDEV_GUEST_SUPPORTS_SEAMLESS
3180 | VMMDEV_GUEST_SUPPORTS_GUEST_HOST_WINDOW_MAPPING
3181 | VMMDEV_GUEST_SUPPORTS_GRAPHICS ) )
3182 {
3183 LogRel(("vgdrvAcquireSessionCapabilities: invalid fOrMask=%#x (pSession=%p fNotMask=%#x fFlags=%#x)\n",
3184 fOrMask, pSession, fNotMask, fFlags));
3185 return VERR_INVALID_PARAMETER;
3186 }
3187
3188 if ((fFlags & ~VBGL_IOC_AGC_FLAGS_VALID_MASK) != 0)
3189 {
3190 LogRel(("vgdrvAcquireSessionCapabilities: invalid fFlags=%#x (pSession=%p fOrMask=%#x fNotMask=%#x)\n",
3191 fFlags, pSession, fOrMask, fNotMask));
3192 return VERR_INVALID_PARAMETER;
3193 }
3194 Assert(!fOrMask || !fSessionTermination);
3195
3196 /* The fNotMask no need to have all values valid, invalid ones will simply be ignored. */
3197 fNotMask &= ~fOrMask;
3198
3199 /*
3200 * Preallocate a update request if we're about to do more than just configure
3201 * the capability mode.
3202 */
3203 if (!(fFlags & VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE))
3204 {
3205 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
3206 if (RT_SUCCESS(rc))
3207 { /* do nothing */ }
3208 else if (!fSessionTermination)
3209 {
3210 LogRel(("vgdrvAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x fFlags=%#x: VbglGRAlloc failure: %Rrc\n",
3211 pSession, fOrMask, fNotMask, fFlags, rc));
3212 return rc;
3213 }
3214 else
3215 pReq = NULL; /* Ignore failure, we must do session cleanup. */
3216 }
3217
3218 /*
3219 * Try switch the capabilities in the OR mask into "acquire" mode.
3220 *
3221 * Note! We currently ignore anyone which may already have "set" the capabilities
3222 * in fOrMask. Perhaps not the best way to handle it, but it's simple...
3223 */
3224 RTSpinlockAcquire(pDevExt->EventSpinlock);
3225
3226 if (!(pDevExt->fSetModeGuestCaps & fOrMask))
3227 pDevExt->fAcquireModeGuestCaps |= fOrMask;
3228 else
3229 {
3230 RTSpinlockRelease(pDevExt->EventSpinlock);
3231
3232 if (pReq)
3233 VbglGRFree(&pReq->header);
3234 AssertMsgFailed(("Trying to change caps mode: %#x\n", fOrMask));
3235 LogRel(("vgdrvAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x fFlags=%#x: calling caps acquire for set caps\n",
3236 pSession, fOrMask, fNotMask, fFlags));
3237 return VERR_INVALID_STATE;
3238 }
3239
3240 /*
3241 * If we only wanted to switch the capabilities into "acquire" mode, we're done now.
3242 */
3243 if (fFlags & VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE)
3244 {
3245 RTSpinlockRelease(pDevExt->EventSpinlock);
3246
3247 Assert(!pReq);
3248 Log(("vgdrvAcquireSessionCapabilities: pSession=%p fOrMask=%#x fNotMask=%#x fFlags=%#x: configured acquire caps: 0x%x\n",
3249 pSession, fOrMask, fNotMask, fFlags));
3250 return VINF_SUCCESS;
3251 }
3252 Assert(pReq || fSessionTermination);
3253
3254 /*
3255 * Caller wants to acquire/release the capabilities too.
3256 *
3257 * Note! The mode change of the capabilities above won't be reverted on
3258 * failure, this is intentional.
3259 */
3260 fCurrentOwnedCaps = pSession->fAcquiredGuestCaps;
3261 fSessionRemovedCaps = fCurrentOwnedCaps & fNotMask;
3262 fSessionAddedCaps = fOrMask & ~fCurrentOwnedCaps;
3263 fOtherConflictingCaps = pDevExt->fAcquiredGuestCaps & ~fCurrentOwnedCaps;
3264 fOtherConflictingCaps &= fSessionAddedCaps;
3265
3266 if (!fOtherConflictingCaps)
3267 {
3268 if (fSessionAddedCaps)
3269 {
3270 pSession->fAcquiredGuestCaps |= fSessionAddedCaps;
3271 pDevExt->fAcquiredGuestCaps |= fSessionAddedCaps;
3272 }
3273
3274 if (fSessionRemovedCaps)
3275 {
3276 pSession->fAcquiredGuestCaps &= ~fSessionRemovedCaps;
3277 pDevExt->fAcquiredGuestCaps &= ~fSessionRemovedCaps;
3278 }
3279
3280 /*
3281 * If something changes (which is very likely), tell the host.
3282 */
3283 if (fSessionAddedCaps || fSessionRemovedCaps || pDevExt->fGuestCapsHost == UINT32_MAX)
3284 {
3285 Assert(pReq || fSessionTermination);
3286 if (pReq)
3287 {
3288 rc = vgdrvUpdateCapabilitiesOnHostWithReqAndLock(pDevExt, pReq);
3289 if (RT_FAILURE(rc) && !fSessionTermination)
3290 {
3291 /* Failed, roll back. */
3292 if (fSessionAddedCaps)
3293 {
3294 pSession->fAcquiredGuestCaps &= ~fSessionAddedCaps;
3295 pDevExt->fAcquiredGuestCaps &= ~fSessionAddedCaps;
3296 }
3297 if (fSessionRemovedCaps)
3298 {
3299 pSession->fAcquiredGuestCaps |= fSessionRemovedCaps;
3300 pDevExt->fAcquiredGuestCaps |= fSessionRemovedCaps;
3301 }
3302
3303 RTSpinlockRelease(pDevExt->EventSpinlock);
3304 LogRel(("vgdrvAcquireSessionCapabilities: vgdrvUpdateCapabilitiesOnHostWithReqAndLock failed: rc=%Rrc\n", rc));
3305 VbglGRFree(&pReq->header);
3306 return rc;
3307 }
3308 }
3309 }
3310 }
3311 else
3312 {
3313 RTSpinlockRelease(pDevExt->EventSpinlock);
3314
3315 Log(("vgdrvAcquireSessionCapabilities: Caps %#x were busy\n", fOtherConflictingCaps));
3316 VbglGRFree(&pReq->header);
3317 return VERR_RESOURCE_BUSY;
3318 }
3319
3320 RTSpinlockRelease(pDevExt->EventSpinlock);
3321 if (pReq)
3322 VbglGRFree(&pReq->header);
3323
3324 /*
3325 * If we added a capability, check if that means some other thread in our
3326 * session should be unblocked because there are events pending.
3327 *
3328 * HACK ALERT! When the seamless support capability is added we generate a
3329 * seamless change event so that the ring-3 client can sync with
3330 * the seamless state. Although this introduces a spurious
3331 * wakeups of the ring-3 client, it solves the problem of client
3332 * state inconsistency in multiuser environment (on Windows).
3333 */
3334 if (fSessionAddedCaps)
3335 {
3336 uint32_t fGenFakeEvents = 0;
3337 if (fSessionAddedCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
3338 fGenFakeEvents |= VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
3339
3340 RTSpinlockAcquire(pDevExt->EventSpinlock);
3341 if (fGenFakeEvents || pDevExt->f32PendingEvents)
3342 vgdrvDispatchEventsLocked(pDevExt, fGenFakeEvents);
3343 RTSpinlockRelease(pDevExt->EventSpinlock);
3344
3345#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
3346 VGDrvCommonWaitDoWakeUps(pDevExt);
3347#endif
3348 }
3349
3350 return VINF_SUCCESS;
3351}
3352
3353
3354/**
3355 * Handle VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES.
3356 *
3357 * @returns VBox status code.
3358 *
3359 * @param pDevExt The device extension.
3360 * @param pSession The session.
3361 * @param pAcquire The request.
3362 */
3363static int vgdrvIoCtl_GuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCACQUIREGUESTCAPS pAcquire)
3364{
3365 int rc;
3366 LogFlow(("VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES: or=%#x not=%#x flags=%#x\n",
3367 pAcquire->u.In.fOrMask, pAcquire->u.In.fNotMask, pAcquire->u.In.fFlags));
3368
3369 rc = vgdrvAcquireSessionCapabilities(pDevExt, pSession, pAcquire->u.In.fOrMask, pAcquire->u.In.fNotMask,
3370 pAcquire->u.In.fFlags, false /*fSessionTermination*/);
3371 if (RT_FAILURE(rc))
3372 LogRel(("VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES failed rc=%Rrc\n", rc));
3373 return rc;
3374}
3375
3376
3377/**
3378 * Sets the guest capabilities for a session.
3379 *
3380 * @returns VBox status code.
3381 * @param pDevExt The device extension.
3382 * @param pSession The session.
3383 * @param fOrMask The capabilities to add.
3384 * @param fNotMask The capabilities to remove.
3385 * @param pfSessionCaps Where to return the guest capabilities reported
3386 * for this session. Optional.
3387 * @param pfGlobalCaps Where to return the guest capabilities reported
3388 * for all the sessions. Optional.
3389 *
3390 * @param fSessionTermination Set if we're called by the session cleanup code.
3391 * This tweaks the error handling so we perform
3392 * proper session cleanup even if the host
3393 * misbehaves.
3394 *
3395 * @remarks Takes the session spinlock.
3396 */
3397static int vgdrvSetSessionCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
3398 uint32_t fOrMask, uint32_t fNotMask, uint32_t *pfSessionCaps, uint32_t *pfGlobalCaps,
3399 bool fSessionTermination)
3400{
3401 /*
3402 * Preallocate a request buffer so we can do all in one go without leaving the spinlock.
3403 */
3404 VMMDevReqGuestCapabilities2 *pReq;
3405 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
3406 if (RT_SUCCESS(rc))
3407 { /* nothing */ }
3408 else if (!fSessionTermination)
3409 {
3410 if (pfSessionCaps)
3411 *pfSessionCaps = UINT32_MAX;
3412 if (pfGlobalCaps)
3413 *pfGlobalCaps = UINT32_MAX;
3414 LogRel(("vgdrvSetSessionCapabilities: VbglGRAlloc failure: %Rrc\n", rc));
3415 return rc;
3416 }
3417 else
3418 pReq = NULL; /* Ignore failure, we must do session cleanup. */
3419
3420
3421 RTSpinlockAcquire(pDevExt->SessionSpinlock);
3422
3423#ifndef VBOXGUEST_DISREGARD_ACQUIRE_MODE_GUEST_CAPS
3424 /*
3425 * Capabilities in "acquire" mode cannot be set via this API.
3426 * (Acquire mode is only used on windows at the time of writing.)
3427 */
3428 if (!(fOrMask & pDevExt->fAcquireModeGuestCaps))
3429#endif
3430 {
3431 /*
3432 * Apply the changes to the session mask.
3433 */
3434 uint32_t fChanged;
3435 uint32_t fPrevious = pSession->fCapabilities;
3436 pSession->fCapabilities |= fOrMask;
3437 pSession->fCapabilities &= ~fNotMask;
3438
3439 /*
3440 * If anything actually changed, update the global usage counters.
3441 */
3442 fChanged = fPrevious ^ pSession->fCapabilities;
3443 if (fChanged)
3444 {
3445 bool fGlobalChange = vgdrvBitUsageTrackerChange(&pDevExt->SetGuestCapsTracker, fChanged, fPrevious,
3446 pDevExt->cSessions, "SetGuestCapsTracker");
3447
3448 /*
3449 * If there are global changes, update the capabilities on the host.
3450 */
3451 if (fGlobalChange || pDevExt->fGuestCapsHost == UINT32_MAX)
3452 {
3453 Assert(pReq || fSessionTermination);
3454 if (pReq)
3455 {
3456 rc = vgdrvUpdateCapabilitiesOnHostWithReqAndLock(pDevExt, pReq);
3457
3458 /* On failure, roll back (unless it's session termination time). */
3459 if (RT_FAILURE(rc) && !fSessionTermination)
3460 {
3461 vgdrvBitUsageTrackerChange(&pDevExt->SetGuestCapsTracker, fChanged, pSession->fCapabilities,
3462 pDevExt->cSessions, "SetGuestCapsTracker");
3463 pSession->fCapabilities = fPrevious;
3464 }
3465 }
3466 }
3467 }
3468 }
3469#ifndef VBOXGUEST_DISREGARD_ACQUIRE_MODE_GUEST_CAPS
3470 else
3471 rc = VERR_RESOURCE_BUSY;
3472#endif
3473
3474 if (pfSessionCaps)
3475 *pfSessionCaps = pSession->fCapabilities;
3476 if (pfGlobalCaps)
3477 *pfGlobalCaps = pDevExt->fAcquiredGuestCaps | pDevExt->SetGuestCapsTracker.fMask;
3478
3479 RTSpinlockRelease(pDevExt->SessionSpinlock);
3480 if (pReq)
3481 VbglGRFree(&pReq->header);
3482 return rc;
3483}
3484
3485
3486/**
3487 * Handle VBGL_IOCTL_CHANGE_GUEST_CAPABILITIES.
3488 *
3489 * @returns VBox status code.
3490 *
3491 * @param pDevExt The device extension.
3492 * @param pSession The session.
3493 * @param pInfo The request.
3494 */
3495static int vgdrvIoCtl_SetCapabilities(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLIOCSETGUESTCAPS pInfo)
3496{
3497 int rc;
3498 LogFlow(("VBGL_IOCTL_CHANGE_GUEST_CAPABILITIES: or=%#x not=%#x\n", pInfo->u.In.fOrMask, pInfo->u.In.fNotMask));
3499
3500 if (!((pInfo->u.In.fOrMask | pInfo->u.In.fNotMask) & ~VMMDEV_GUEST_CAPABILITIES_MASK))
3501 rc = vgdrvSetSessionCapabilities(pDevExt, pSession, pInfo->u.In.fOrMask, pInfo->u.In.fNotMask,
3502 &pInfo->u.Out.fSessionCaps, &pInfo->u.Out.fGlobalCaps, false /*fSessionTermination*/);
3503 else
3504 rc = VERR_INVALID_PARAMETER;
3505
3506 return rc;
3507}
3508
3509/** @} */
3510
3511
3512/**
3513 * Common IOCtl for user to kernel and kernel to kernel communication.
3514 *
3515 * This function only does the basic validation and then invokes
3516 * worker functions that takes care of each specific function.
3517 *
3518 * @returns VBox status code.
3519 *
3520 * @param iFunction The requested function.
3521 * @param pDevExt The device extension.
3522 * @param pSession The client session.
3523 * @param pReqHdr Pointer to the request. This always starts with
3524 * a request common header.
3525 * @param cbReq The max size of the request buffer.
3526 */
3527int VGDrvCommonIoCtl(uintptr_t iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, PVBGLREQHDR pReqHdr, size_t cbReq)
3528{
3529 uintptr_t const iFunctionStripped = VBGL_IOCTL_CODE_STRIPPED(iFunction);
3530 int rc;
3531
3532 LogFlow(("VGDrvCommonIoCtl: iFunction=%#x pDevExt=%p pSession=%p pReqHdr=%p cbReq=%zu\n",
3533 iFunction, pDevExt, pSession, pReqHdr, cbReq));
3534
3535 /*
3536 * Define some helper macros to simplify validation.
3537 */
3538#define REQ_CHECK_SIZES_EX(Name, cbInExpect, cbOutExpect) \
3539 do { \
3540 if (RT_LIKELY( pReqHdr->cbIn == (cbInExpect) \
3541 && ( pReqHdr->cbOut == (cbOutExpect) \
3542 || ((cbInExpect) == (cbOutExpect) && pReqHdr->cbOut == 0) ) )) \
3543 { /* likely */ } \
3544 else \
3545 { \
3546 Log(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n", \
3547 (long)pReqHdr->cbIn, (long)(cbInExpect), (long)pReqHdr->cbOut, (long)(cbOutExpect))); \
3548 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
3549 } \
3550 } while (0)
3551
3552#define REQ_CHECK_SIZES(Name) REQ_CHECK_SIZES_EX(Name, Name ## _SIZE_IN, Name ## _SIZE_OUT)
3553
3554#define REQ_CHECK_SIZE_IN(Name, cbInExpect) \
3555 do { \
3556 if (RT_LIKELY(pReqHdr->cbIn == (cbInExpect))) \
3557 { /* likely */ } \
3558 else \
3559 { \
3560 Log(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld.\n", \
3561 (long)pReqHdr->cbIn, (long)(cbInExpect))); \
3562 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
3563 } \
3564 } while (0)
3565
3566#define REQ_CHECK_SIZE_OUT(Name, cbOutExpect) \
3567 do { \
3568 if (RT_LIKELY( pReqHdr->cbOut == (cbOutExpect) \
3569 || (pReqHdr->cbOut == 0 && pReqHdr->cbIn == (cbOutExpect)))) \
3570 { /* likely */ } \
3571 else \
3572 { \
3573 Log(( #Name ": Invalid input/output sizes. cbOut=%ld (%ld) expected %ld.\n", \
3574 (long)pReqHdr->cbOut, (long)pReqHdr->cbIn, (long)(cbOutExpect))); \
3575 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
3576 } \
3577 } while (0)
3578
3579#define REQ_CHECK_EXPR(Name, expr) \
3580 do { \
3581 if (RT_LIKELY(!!(expr))) \
3582 { /* likely */ } \
3583 else \
3584 { \
3585 Log(( #Name ": %s\n", #expr)); \
3586 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
3587 } \
3588 } while (0)
3589
3590#define REQ_CHECK_EXPR_FMT(expr, fmt) \
3591 do { \
3592 if (RT_LIKELY(!!(expr))) \
3593 { /* likely */ } \
3594 else \
3595 { \
3596 Log( fmt ); \
3597 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
3598 } \
3599 } while (0)
3600
3601#define REQ_CHECK_RING0(mnemonic) \
3602 do { \
3603 if (pSession->R0Process != NIL_RTR0PROCESS) \
3604 { \
3605 LogFunc((mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
3606 pSession->Process, (uintptr_t)pSession->R0Process)); \
3607 return pReqHdr->rc = VERR_PERMISSION_DENIED; \
3608 } \
3609 } while (0)
3610
3611
3612 /*
3613 * Validate the request.
3614 */
3615 if (RT_LIKELY(cbReq >= sizeof(*pReqHdr)))
3616 { /* likely */ }
3617 else
3618 {
3619 Log(("VGDrvCommonIoCtl: Bad ioctl request size; cbReq=%#lx\n", (long)cbReq));
3620 return VERR_INVALID_PARAMETER;
3621 }
3622
3623 if (pReqHdr->cbOut == 0)
3624 pReqHdr->cbOut = pReqHdr->cbIn;
3625
3626 if (RT_LIKELY( pReqHdr->uVersion == VBGLREQHDR_VERSION
3627 && pReqHdr->cbIn >= sizeof(*pReqHdr)
3628 && pReqHdr->cbIn <= cbReq
3629 && pReqHdr->cbOut >= sizeof(*pReqHdr)
3630 && pReqHdr->cbOut <= cbReq))
3631 { /* likely */ }
3632 else
3633 {
3634 Log(("VGDrvCommonIoCtl: Bad ioctl request header; cbIn=%#lx cbOut=%#lx version=%#lx\n",
3635 (long)pReqHdr->cbIn, (long)pReqHdr->cbOut, (long)pReqHdr->uVersion));
3636 return VERR_INVALID_PARAMETER;
3637 }
3638
3639 if (RT_LIKELY(RT_VALID_PTR(pSession)))
3640 { /* likely */ }
3641 else
3642 {
3643 Log(("VGDrvCommonIoCtl: Invalid pSession value %p (ioctl=%#x)\n", pSession, iFunction));
3644 return VERR_INVALID_PARAMETER;
3645 }
3646
3647
3648 /*
3649 * Deal with variably sized requests first.
3650 */
3651 rc = VINF_SUCCESS;
3652 if ( iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_VMMDEV_REQUEST(0))
3653 || iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_VMMDEV_REQUEST_BIG) )
3654 {
3655 REQ_CHECK_EXPR(VBGL_IOCTL_VMMDEV_REQUEST, pReqHdr->uType != VBGLREQHDR_TYPE_DEFAULT);
3656 REQ_CHECK_EXPR_FMT(pReqHdr->cbIn == pReqHdr->cbOut,
3657 ("VBGL_IOCTL_VMMDEV_REQUEST: cbIn=%ld != cbOut=%ld\n", (long)pReqHdr->cbIn, (long)pReqHdr->cbOut));
3658 pReqHdr->rc = vgdrvIoCtl_VMMDevRequest(pDevExt, pSession, (VMMDevRequestHeader *)pReqHdr, cbReq);
3659 }
3660 else if (RT_LIKELY(pReqHdr->uType == VBGLREQHDR_TYPE_DEFAULT))
3661 {
3662 if (iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_LOG(0)))
3663 {
3664 REQ_CHECK_SIZE_OUT(VBGL_IOCTL_LOG, VBGL_IOCTL_LOG_SIZE_OUT);
3665 pReqHdr->rc = vgdrvIoCtl_Log(pDevExt, &((PVBGLIOCLOG)pReqHdr)->u.In.szMsg[0], pReqHdr->cbIn - sizeof(VBGLREQHDR),
3666 pSession->fUserSession);
3667 }
3668#ifdef VBOX_WITH_HGCM
3669 else if (iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_HGCM_CALL(0)))
3670 {
3671 REQ_CHECK_EXPR(VBGL_IOCTL_HGCM_CALL, pReqHdr->cbIn >= sizeof(VBGLIOCHGCMCALL));
3672 REQ_CHECK_EXPR(VBGL_IOCTL_HGCM_CALL, pReqHdr->cbIn == pReqHdr->cbOut);
3673 pReqHdr->rc = vgdrvIoCtl_HGCMCallWrapper(pDevExt, pSession, (PVBGLIOCHGCMCALL)pReqHdr,
3674 !VBGL_IOCTL_IS_64BIT(iFunction), false /*fUserData*/, cbReq);
3675 }
3676 else if (iFunctionStripped == VBGL_IOCTL_CODE_STRIPPED(VBGL_IOCTL_HGCM_CALL_WITH_USER_DATA(0)))
3677 {
3678 REQ_CHECK_RING0("VBGL_IOCTL_HGCM_CALL_WITH_USER_DATA");
3679 REQ_CHECK_EXPR(VBGL_IOCTL_HGCM_CALL, pReqHdr->cbIn >= sizeof(VBGLIOCHGCMCALL));
3680 REQ_CHECK_EXPR(VBGL_IOCTL_HGCM_CALL, pReqHdr->cbIn == pReqHdr->cbOut);
3681 pReqHdr->rc = vgdrvIoCtl_HGCMCallWrapper(pDevExt, pSession, (PVBGLIOCHGCMCALL)pReqHdr,
3682 !VBGL_IOCTL_IS_64BIT(iFunction), true /*fUserData*/, cbReq);
3683 }
3684#endif /* VBOX_WITH_HGCM */
3685 else
3686 {
3687 switch (iFunction)
3688 {
3689 /*
3690 * Ring-0 only:
3691 */
3692 case VBGL_IOCTL_IDC_CONNECT:
3693 REQ_CHECK_RING0("VBGL_IOCL_IDC_CONNECT");
3694 REQ_CHECK_SIZES(VBGL_IOCTL_IDC_CONNECT);
3695 pReqHdr->rc = vgdrvIoCtl_IdcConnect(pDevExt, pSession, (PVBGLIOCIDCCONNECT)pReqHdr);
3696 break;
3697
3698 case VBGL_IOCTL_IDC_DISCONNECT:
3699 REQ_CHECK_RING0("VBGL_IOCTL_IDC_DISCONNECT");
3700 REQ_CHECK_SIZES(VBGL_IOCTL_IDC_DISCONNECT);
3701 pReqHdr->rc = vgdrvIoCtl_IdcDisconnect(pDevExt, pSession, (PVBGLIOCIDCDISCONNECT)pReqHdr);
3702 break;
3703
3704 case VBGL_IOCTL_GET_VMMDEV_IO_INFO:
3705 REQ_CHECK_RING0("GET_VMMDEV_IO_INFO");
3706 REQ_CHECK_SIZES(VBGL_IOCTL_GET_VMMDEV_IO_INFO);
3707 pReqHdr->rc = vgdrvIoCtl_GetVMMDevIoInfo(pDevExt, (PVBGLIOCGETVMMDEVIOINFO)pReqHdr);
3708 break;
3709
3710 case VBGL_IOCTL_SET_MOUSE_NOTIFY_CALLBACK:
3711 REQ_CHECK_RING0("SET_MOUSE_NOTIFY_CALLBACK");
3712 REQ_CHECK_SIZES(VBGL_IOCTL_SET_MOUSE_NOTIFY_CALLBACK);
3713 pReqHdr->rc = vgdrvIoCtl_SetMouseNotifyCallback(pDevExt, (PVBGLIOCSETMOUSENOTIFYCALLBACK)pReqHdr);
3714 break;
3715
3716 /*
3717 * Ring-3 only:
3718 */
3719 case VBGL_IOCTL_DRIVER_VERSION_INFO:
3720 REQ_CHECK_SIZES(VBGL_IOCTL_DRIVER_VERSION_INFO);
3721 pReqHdr->rc = vgdrvIoCtl_DriverVersionInfo(pDevExt, pSession, (PVBGLIOCDRIVERVERSIONINFO)pReqHdr);
3722 break;
3723
3724 /*
3725 * Both ring-3 and ring-0:
3726 */
3727 case VBGL_IOCTL_WAIT_FOR_EVENTS:
3728 REQ_CHECK_SIZES(VBGL_IOCTL_WAIT_FOR_EVENTS);
3729 pReqHdr->rc = vgdrvIoCtl_WaitForEvents(pDevExt, pSession, (VBGLIOCWAITFOREVENTS *)pReqHdr,
3730 pSession->R0Process != NIL_RTR0PROCESS);
3731 break;
3732
3733 case VBGL_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS:
3734 REQ_CHECK_SIZES(VBGL_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS);
3735 pReqHdr->rc = vgdrvIoCtl_CancelAllWaitEvents(pDevExt, pSession);
3736 break;
3737
3738 case VBGL_IOCTL_CHANGE_FILTER_MASK:
3739 REQ_CHECK_SIZES(VBGL_IOCTL_CHANGE_FILTER_MASK);
3740 pReqHdr->rc = vgdrvIoCtl_ChangeFilterMask(pDevExt, pSession, (PVBGLIOCCHANGEFILTERMASK)pReqHdr);
3741 break;
3742
3743#ifdef VBOX_WITH_HGCM
3744 case VBGL_IOCTL_HGCM_CONNECT:
3745 REQ_CHECK_SIZES(VBGL_IOCTL_HGCM_CONNECT);
3746 pReqHdr->rc = vgdrvIoCtl_HGCMConnect(pDevExt, pSession, (PVBGLIOCHGCMCONNECT)pReqHdr);
3747 break;
3748
3749 case VBGL_IOCTL_HGCM_DISCONNECT:
3750 REQ_CHECK_SIZES(VBGL_IOCTL_HGCM_DISCONNECT);
3751 pReqHdr->rc = vgdrvIoCtl_HGCMDisconnect(pDevExt, pSession, (PVBGLIOCHGCMDISCONNECT)pReqHdr);
3752 break;
3753#endif
3754
3755 case VBGL_IOCTL_CHECK_BALLOON:
3756 REQ_CHECK_SIZES(VBGL_IOCTL_CHECK_BALLOON);
3757 pReqHdr->rc = vgdrvIoCtl_CheckMemoryBalloon(pDevExt, pSession, (PVBGLIOCCHECKBALLOON)pReqHdr);
3758 break;
3759
3760 case VBGL_IOCTL_CHANGE_BALLOON:
3761 REQ_CHECK_SIZES(VBGL_IOCTL_CHANGE_BALLOON);
3762 pReqHdr->rc = vgdrvIoCtl_ChangeMemoryBalloon(pDevExt, pSession, (PVBGLIOCCHANGEBALLOON)pReqHdr);
3763 break;
3764
3765 case VBGL_IOCTL_WRITE_CORE_DUMP:
3766 REQ_CHECK_SIZES(VBGL_IOCTL_WRITE_CORE_DUMP);
3767 pReqHdr->rc = vgdrvIoCtl_WriteCoreDump(pDevExt, (PVBGLIOCWRITECOREDUMP)pReqHdr);
3768 break;
3769
3770 case VBGL_IOCTL_SET_MOUSE_STATUS:
3771 REQ_CHECK_SIZES(VBGL_IOCTL_SET_MOUSE_STATUS);
3772 pReqHdr->rc = vgdrvIoCtl_SetMouseStatus(pDevExt, pSession, ((PVBGLIOCSETMOUSESTATUS)pReqHdr)->u.In.fStatus);
3773 break;
3774
3775 case VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES:
3776 REQ_CHECK_SIZES(VBGL_IOCTL_ACQUIRE_GUEST_CAPABILITIES);
3777 pReqHdr->rc = vgdrvIoCtl_GuestCapsAcquire(pDevExt, pSession, (PVBGLIOCACQUIREGUESTCAPS)pReqHdr);
3778 break;
3779
3780 case VBGL_IOCTL_CHANGE_GUEST_CAPABILITIES:
3781 REQ_CHECK_SIZES(VBGL_IOCTL_CHANGE_GUEST_CAPABILITIES);
3782 pReqHdr->rc = vgdrvIoCtl_SetCapabilities(pDevExt, pSession, (PVBGLIOCSETGUESTCAPS)pReqHdr);
3783 break;
3784
3785#ifdef VBOX_WITH_DPC_LATENCY_CHECKER
3786 case VBGL_IOCTL_DPC_LATENCY_CHECKER:
3787 REQ_CHECK_SIZES(VBGL_IOCTL_DPC_LATENCY_CHECKER);
3788 pReqHdr->rc = VGDrvNtIOCtl_DpcLatencyChecker();
3789 break;
3790#endif
3791
3792 default:
3793 {
3794 LogRel(("VGDrvCommonIoCtl: Unknown request iFunction=%#x (stripped %#x) cbReq=%#x\n",
3795 iFunction, iFunctionStripped, cbReq));
3796 pReqHdr->rc = rc = VERR_NOT_SUPPORTED;
3797 break;
3798 }
3799 }
3800 }
3801 }
3802 else
3803 {
3804 Log(("VGDrvCommonIoCtl: uType=%#x, expected default (ioctl=%#x)\n", pReqHdr->uType, iFunction));
3805 return VERR_INVALID_PARAMETER;
3806 }
3807
3808 LogFlow(("VGDrvCommonIoCtl: returns %Rrc (req: rc=%Rrc cbOut=%#x)\n", rc, pReqHdr->rc, pReqHdr->cbOut));
3809 return rc;
3810}
3811
3812
3813/**
3814 * Used by VGDrvCommonISR as well as the acquire guest capability code.
3815 *
3816 * @returns VINF_SUCCESS on success. On failure, ORed together
3817 * RTSemEventMultiSignal errors (completes processing despite errors).
3818 * @param pDevExt The VBoxGuest device extension.
3819 * @param fEvents The events to dispatch.
3820 */
3821static int vgdrvDispatchEventsLocked(PVBOXGUESTDEVEXT pDevExt, uint32_t fEvents)
3822{
3823 PVBOXGUESTWAIT pWait;
3824 PVBOXGUESTWAIT pSafe;
3825 int rc = VINF_SUCCESS;
3826
3827 fEvents |= pDevExt->f32PendingEvents;
3828
3829 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
3830 {
3831 uint32_t fHandledEvents = pWait->fReqEvents & fEvents;
3832 if ( fHandledEvents != 0
3833 && !pWait->fResEvents)
3834 {
3835 /* Does this one wait on any of the events we're dispatching? We do a quick
3836 check first, then deal with VBOXGUEST_ACQUIRE_STYLE_EVENTS as applicable. */
3837 if (fHandledEvents & VBOXGUEST_ACQUIRE_STYLE_EVENTS)
3838 fHandledEvents &= vgdrvGetAllowedEventMaskForSession(pDevExt, pWait->pSession);
3839 if (fHandledEvents)
3840 {
3841 pWait->fResEvents = pWait->fReqEvents & fEvents & fHandledEvents;
3842 fEvents &= ~pWait->fResEvents;
3843 RTListNodeRemove(&pWait->ListNode);
3844#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
3845 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
3846#else
3847 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
3848 rc |= RTSemEventMultiSignal(pWait->Event);
3849#endif
3850 if (!fEvents)
3851 break;
3852 }
3853 }
3854 }
3855
3856 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
3857 return rc;
3858}
3859
3860
3861/**
3862 * Simply checks whether the IRQ is ours or not, does not do any interrupt
3863 * procesing.
3864 *
3865 * @returns true if it was our interrupt, false if it wasn't.
3866 * @param pDevExt The VBoxGuest device extension.
3867 */
3868bool VGDrvCommonIsOurIRQ(PVBOXGUESTDEVEXT pDevExt)
3869{
3870 RTSpinlockAcquire(pDevExt->EventSpinlock);
3871 bool const fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
3872 RTSpinlockRelease(pDevExt->EventSpinlock);
3873
3874 return fOurIrq;
3875}
3876
3877
3878/**
3879 * Common interrupt service routine.
3880 *
3881 * This deals with events and with waking up thread waiting for those events.
3882 *
3883 * @returns true if it was our interrupt, false if it wasn't.
3884 * @param pDevExt The VBoxGuest device extension.
3885 */
3886bool VGDrvCommonISR(PVBOXGUESTDEVEXT pDevExt)
3887{
3888 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
3889 bool fMousePositionChanged = false;
3890 int rc = 0;
3891 bool fOurIrq;
3892
3893 /*
3894 * Make sure we've initialized the device extension.
3895 */
3896 if (RT_UNLIKELY(!pReq))
3897 return false;
3898
3899 /*
3900 * Enter the spinlock and check if it's our IRQ or not.
3901 */
3902 RTSpinlockAcquire(pDevExt->EventSpinlock);
3903 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
3904 if (fOurIrq)
3905 {
3906 /*
3907 * Acknowlegde events.
3908 * We don't use VbglGRPerform here as it may take another spinlocks.
3909 */
3910 pReq->header.rc = VERR_INTERNAL_ERROR;
3911 pReq->events = 0;
3912 ASMCompilerBarrier();
3913 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
3914 ASMCompilerBarrier(); /* paranoia */
3915 if (RT_SUCCESS(pReq->header.rc))
3916 {
3917 uint32_t fEvents = pReq->events;
3918
3919 Log3(("VGDrvCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
3920
3921 /*
3922 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
3923 */
3924 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
3925 {
3926 fMousePositionChanged = true;
3927 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
3928#if !defined(VBOXGUEST_MOUSE_NOTIFY_CAN_PREEMPT)
3929 if (pDevExt->MouseNotifyCallback.pfnNotify)
3930 pDevExt->MouseNotifyCallback.pfnNotify(pDevExt->MouseNotifyCallback.pvUser);
3931#endif
3932 }
3933
3934#ifdef VBOX_WITH_HGCM
3935 /*
3936 * The HGCM event/list is kind of different in that we evaluate all entries.
3937 */
3938 if (fEvents & VMMDEV_EVENT_HGCM)
3939 {
3940 PVBOXGUESTWAIT pWait;
3941 PVBOXGUESTWAIT pSafe;
3942 RTListForEachSafe(&pDevExt->HGCMWaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
3943 {
3944 if (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE)
3945 {
3946 pWait->fResEvents = VMMDEV_EVENT_HGCM;
3947 RTListNodeRemove(&pWait->ListNode);
3948# ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
3949 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
3950# else
3951 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
3952 rc |= RTSemEventMultiSignal(pWait->Event);
3953# endif
3954 }
3955 }
3956 fEvents &= ~VMMDEV_EVENT_HGCM;
3957 }
3958#endif
3959
3960 /*
3961 * Normal FIFO waiter evaluation.
3962 */
3963 rc |= vgdrvDispatchEventsLocked(pDevExt, fEvents);
3964 }
3965 else /* something is serious wrong... */
3966 Log(("VGDrvCommonISR: acknowledge events failed rc=%Rrc (events=%#x)!!\n",
3967 pReq->header.rc, pReq->events));
3968 }
3969 else
3970 Log3(("VGDrvCommonISR: not ours\n"));
3971
3972 RTSpinlockRelease(pDevExt->EventSpinlock);
3973
3974 /*
3975 * Execute the mouse notification callback here if it cannot be executed while
3976 * holding the interrupt safe spinlock, see @bugref{8639}.
3977 */
3978#if defined(VBOXGUEST_MOUSE_NOTIFY_CAN_PREEMPT) && !defined(RT_OS_WINDOWS) /* (Windows does this in the Dpc callback) */
3979 if ( fMousePositionChanged
3980 && pDevExt->MouseNotifyCallback.pfnNotify)
3981 pDevExt->MouseNotifyCallback.pfnNotify(pDevExt->MouseNotifyCallback.pvUser);
3982#endif
3983
3984#if defined(VBOXGUEST_USE_DEFERRED_WAKE_UP) && !defined(RT_OS_DARWIN) && !defined(RT_OS_WINDOWS)
3985 /*
3986 * Do wake-ups.
3987 * Note. On Windows this isn't possible at this IRQL, so a DPC will take
3988 * care of it. Same on darwin, doing it in the work loop callback.
3989 */
3990 VGDrvCommonWaitDoWakeUps(pDevExt);
3991#endif
3992
3993 /*
3994 * Work the poll and async notification queues on OSes that implements that.
3995 * (Do this outside the spinlock to prevent some recursive spinlocking.)
3996 */
3997 if (fMousePositionChanged)
3998 {
3999 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
4000 VGDrvNativeISRMousePollEvent(pDevExt);
4001 }
4002
4003 Assert(rc == 0);
4004 NOREF(rc);
4005 return fOurIrq;
4006}
4007
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette