VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 50722

Last change on this file since 50722 was 50688, checked in by vboxsync, 11 years ago

VBoxGuest/darwin: Delay waking up threads still we're out of interrupt handler context, or we'll trip up in the even semaphore wake up code.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 111.7 KB
Line 
1/* $Id: VBoxGuest.cpp 50688 2014-03-04 23:11:39Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#define LOG_GROUP LOG_GROUP_DEFAULT
32#include "VBoxGuestInternal.h"
33#include "VBoxGuest2.h"
34#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
35#include <VBox/log.h>
36#include <iprt/mem.h>
37#include <iprt/time.h>
38#include <iprt/memobj.h>
39#include <iprt/asm.h>
40#include <iprt/asm-amd64-x86.h>
41#include <iprt/string.h>
42#include <iprt/process.h>
43#include <iprt/assert.h>
44#include <iprt/param.h>
45#ifdef VBOX_WITH_HGCM
46# include <iprt/thread.h>
47#endif
48#include "version-generated.h"
49#if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
50# include "revision-generated.h"
51#endif
52#ifdef RT_OS_WINDOWS
53# ifndef CTL_CODE
54# include <Windows.h>
55# endif
56#endif
57#if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
58# include <iprt/rand.h>
59#endif
60
61
62/*******************************************************************************
63* Internal Functions *
64*******************************************************************************/
65#ifdef VBOX_WITH_HGCM
66static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
67#endif
68
69static int VBoxGuestCommonGuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fOrMask, uint32_t fNotMask, VBOXGUESTCAPSACQUIRE_FLAGS enmFlags);
70
71#define VBOXGUEST_ACQUIRE_STYLE_EVENTS (VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST | VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST)
72
73/** Return the mask of VMM device events that this session is allowed to see,
74 * ergo, all events except those in "acquire" mode which have not been acquired
75 * by this session. */
76DECLINLINE(uint32_t) VBoxGuestCommonGetHandledEventsLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
77{
78 if (!pDevExt->u32AcquireModeGuestCaps)
79 return VMMDEV_EVENT_VALID_EVENT_MASK;
80
81 /** @note VMMDEV_EVENT_VALID_EVENT_MASK should actually be the mask of valid
82 * capabilities, but that doesn't affect this code. */
83 uint32_t u32AllowedGuestCaps = pSession->u32AquiredGuestCaps | (VMMDEV_EVENT_VALID_EVENT_MASK & ~pDevExt->u32AcquireModeGuestCaps);
84 uint32_t u32CleanupEvents = VBOXGUEST_ACQUIRE_STYLE_EVENTS;
85 if (u32AllowedGuestCaps & VMMDEV_GUEST_SUPPORTS_GRAPHICS)
86 u32CleanupEvents &= ~VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST;
87 if (u32AllowedGuestCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
88 u32CleanupEvents &= ~VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
89
90 return VMMDEV_EVENT_VALID_EVENT_MASK & ~u32CleanupEvents;
91}
92
93DECLINLINE(uint32_t) VBoxGuestCommonGetAndCleanPendingEventsLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fReqEvents)
94{
95 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents & VBoxGuestCommonGetHandledEventsLocked(pDevExt, pSession);
96 if (fMatches)
97 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
98 return fMatches;
99}
100
101/** Puts a capability in "acquire" or "set" mode and returns the mask of
102 * capabilities currently in the other mode. Once a capability has been put in
103 * one of the two modes it can no longer be removed from that mode. */
104DECLINLINE(bool) VBoxGuestCommonGuestCapsModeSet(PVBOXGUESTDEVEXT pDevExt, uint32_t fCaps, bool fAcquire, uint32_t *pu32OtherVal)
105{
106 uint32_t *pVal = fAcquire ? &pDevExt->u32AcquireModeGuestCaps : &pDevExt->u32SetModeGuestCaps;
107 const uint32_t fNotVal = !fAcquire ? pDevExt->u32AcquireModeGuestCaps : pDevExt->u32SetModeGuestCaps;
108 bool fResult = true;
109 RTSpinlockAcquire(pDevExt->EventSpinlock);
110
111 if (!(fNotVal & fCaps))
112 *pVal |= fCaps;
113 else
114 {
115 AssertMsgFailed(("trying to change caps mode\n"));
116 fResult = false;
117 }
118
119 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
120
121 if (pu32OtherVal)
122 *pu32OtherVal = fNotVal;
123 return fResult;
124}
125
126
127/**
128 * Sets the interrupt filter mask during initialization and termination.
129 *
130 * This will ASSUME that we're the ones in carge over the mask, so
131 * we'll simply clear all bits we don't set.
132 *
133 * @returns VBox status code (ignored).
134 * @param fMask The new mask.
135 */
136static int vboxGuestSetFilterMask(VMMDevCtlGuestFilterMask *pReq,
137 uint32_t fMask)
138{
139 int rc;
140
141 pReq->u32OrMask = fMask;
142 pReq->u32NotMask = ~fMask;
143 rc = VbglGRPerform(&pReq->header);
144 if (RT_FAILURE(rc))
145 LogRel(("vboxGuestSetFilterMask: failed with rc=%Rrc\n", rc));
146 return rc;
147}
148
149
150/**
151 * Sets the guest capabilities to the host.
152 *
153 * This will ASSUME that we're the ones in charge of the mask, so
154 * we'll simply clear all bits we don't set.
155 *
156 * @returns VBox status code.
157 * @param fMask The new mask.
158 */
159static int vboxGuestSetCapabilities(VMMDevReqGuestCapabilities2 *pReq,
160 uint32_t fMask)
161{
162 int rc;
163
164 pReq->u32OrMask = fMask;
165 pReq->u32NotMask = ~fMask;
166 rc = VbglGRPerform(&pReq->header);
167 if (RT_FAILURE(rc))
168 LogRelFunc(("failed with rc=%Rrc\n", rc));
169 return rc;
170}
171
172
173/**
174 * Sets the mouse status to the host.
175 *
176 * This will ASSUME that we're the ones in charge of the mask, so
177 * we'll simply clear all bits we don't set.
178 *
179 * @returns VBox status code.
180 * @param fMask The new mask.
181 */
182static int vboxGuestSetMouseStatus(VMMDevReqMouseStatus *pReq, uint32_t fMask)
183{
184 int rc;
185
186 pReq->mouseFeatures = fMask;
187 pReq->pointerXPos = 0;
188 pReq->pointerYPos = 0;
189 rc = VbglGRPerform(&pReq->header);
190 if (RT_FAILURE(rc))
191 LogRelFunc(("failed with rc=%Rrc\n", rc));
192 return rc;
193}
194
195
196/** Host flags to be updated by a given invocation of the
197 * vboxGuestUpdateHostFlags() method. */
198enum
199{
200 HostFlags_FilterMask = 1,
201 HostFlags_Capabilities = 2,
202 HostFlags_MouseStatus = 4,
203 HostFlags_All = 7,
204 HostFlags_SizeHack = (unsigned)-1
205};
206
207
208static int vboxGuestGetHostFlagsFromSessions(PVBOXGUESTDEVEXT pDevExt,
209 PVBOXGUESTSESSION pSession,
210 uint32_t *pfFilterMask,
211 uint32_t *pfCapabilities,
212 uint32_t *pfMouseStatus)
213{
214 PVBOXGUESTSESSION pIterator;
215 uint32_t fFilterMask = 0, fCapabilities = 0, fMouseStatus = 0;
216 unsigned cSessions = 0;
217 int rc = VINF_SUCCESS;
218
219 RTListForEach(&pDevExt->SessionList, pIterator, VBOXGUESTSESSION, ListNode)
220 {
221 fFilterMask |= pIterator->fFilterMask;
222 fCapabilities |= pIterator->fCapabilities;
223 fMouseStatus |= pIterator->fMouseStatus;
224 ++cSessions;
225 }
226 if (!cSessions)
227 if (fFilterMask | fCapabilities | fMouseStatus)
228 rc = VERR_INTERNAL_ERROR;
229 if (cSessions == 1 && pSession)
230 if ( fFilterMask != pSession->fFilterMask
231 || fCapabilities != pSession->fCapabilities
232 || fMouseStatus != pSession->fMouseStatus)
233 rc = VERR_INTERNAL_ERROR;
234 if (cSessions > 1 && pSession)
235 if ( ~fFilterMask & pSession->fFilterMask
236 || ~fCapabilities & pSession->fCapabilities
237 || ~fMouseStatus & pSession->fMouseStatus)
238 rc = VERR_INTERNAL_ERROR;
239 *pfFilterMask = fFilterMask;
240 *pfCapabilities = fCapabilities;
241 *pfMouseStatus = fMouseStatus;
242 return rc;
243}
244
245
246/** Check which host flags in a given category are being asserted by some guest
247 * session and assert exactly those on the host which are being asserted by one
248 * or more sessions. pCallingSession is purely for sanity checking and can be
249 * NULL.
250 * @note Takes the session spin-lock.
251 */
252static int vboxGuestUpdateHostFlags(PVBOXGUESTDEVEXT pDevExt,
253 PVBOXGUESTSESSION pSession,
254 unsigned enmFlags)
255{
256 int rc;
257 VMMDevCtlGuestFilterMask *pFilterReq = NULL;
258 VMMDevReqGuestCapabilities2 *pCapabilitiesReq = NULL;
259 VMMDevReqMouseStatus *pStatusReq = NULL;
260 uint32_t fFilterMask = 0, fCapabilities = 0, fMouseStatus = 0;
261
262 rc = VbglGRAlloc((VMMDevRequestHeader **)&pFilterReq, sizeof(*pFilterReq),
263 VMMDevReq_CtlGuestFilterMask);
264 if (RT_SUCCESS(rc))
265 rc = VbglGRAlloc((VMMDevRequestHeader **)&pCapabilitiesReq,
266 sizeof(*pCapabilitiesReq),
267 VMMDevReq_SetGuestCapabilities);
268 if (RT_SUCCESS(rc))
269 rc = VbglGRAlloc((VMMDevRequestHeader **)&pStatusReq,
270 sizeof(*pStatusReq), VMMDevReq_SetMouseStatus);
271 RTSpinlockAcquire(pDevExt->SessionSpinlock);
272 if (RT_SUCCESS(rc))
273 rc = vboxGuestGetHostFlagsFromSessions(pDevExt, pSession, &fFilterMask,
274 &fCapabilities, &fMouseStatus);
275 if (RT_SUCCESS(rc))
276 {
277 fFilterMask |= pDevExt->fFixedEvents;
278 /* Since VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR is inverted in the session
279 * capabilities we invert it again before sending it to the host. */
280 fMouseStatus ^= VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR;
281 if (enmFlags & HostFlags_FilterMask)
282 vboxGuestSetFilterMask(pFilterReq, fFilterMask);
283 if (enmFlags & HostFlags_Capabilities)
284 vboxGuestSetCapabilities(pCapabilitiesReq, fCapabilities);
285 if (enmFlags & HostFlags_MouseStatus)
286 vboxGuestSetMouseStatus(pStatusReq, fMouseStatus);
287 }
288 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
289 if (pFilterReq)
290 VbglGRFree(&pFilterReq->header);
291 if (pCapabilitiesReq)
292 VbglGRFree(&pCapabilitiesReq->header);
293 if (pStatusReq)
294 VbglGRFree(&pStatusReq->header);
295 return rc;
296}
297
298
299/*******************************************************************************
300* Global Variables *
301*******************************************************************************/
302static const size_t cbChangeMemBalloonReq = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
303
304#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
305/**
306 * Drag in the rest of IRPT since we share it with the
307 * rest of the kernel modules on Solaris.
308 */
309PFNRT g_apfnVBoxGuestIPRTDeps[] =
310{
311 /* VirtioNet */
312 (PFNRT)RTRandBytes,
313 /* RTSemMutex* */
314 (PFNRT)RTSemMutexCreate,
315 (PFNRT)RTSemMutexDestroy,
316 (PFNRT)RTSemMutexRequest,
317 (PFNRT)RTSemMutexRequestNoResume,
318 (PFNRT)RTSemMutexRequestDebug,
319 (PFNRT)RTSemMutexRequestNoResumeDebug,
320 (PFNRT)RTSemMutexRelease,
321 (PFNRT)RTSemMutexIsOwned,
322 NULL
323};
324#endif /* RT_OS_DARWIN || RT_OS_SOLARIS */
325
326
327/**
328 * Reserves memory in which the VMM can relocate any guest mappings
329 * that are floating around.
330 *
331 * This operation is a little bit tricky since the VMM might not accept
332 * just any address because of address clashes between the three contexts
333 * it operates in, so use a small stack to perform this operation.
334 *
335 * @returns VBox status code (ignored).
336 * @param pDevExt The device extension.
337 */
338static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
339{
340 /*
341 * Query the required space.
342 */
343 VMMDevReqHypervisorInfo *pReq;
344 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
345 if (RT_FAILURE(rc))
346 return rc;
347 pReq->hypervisorStart = 0;
348 pReq->hypervisorSize = 0;
349 rc = VbglGRPerform(&pReq->header);
350 if (RT_FAILURE(rc)) /* this shouldn't happen! */
351 {
352 VbglGRFree(&pReq->header);
353 return rc;
354 }
355
356 /*
357 * The VMM will report back if there is nothing it wants to map, like for
358 * instance in VT-x and AMD-V mode.
359 */
360 if (pReq->hypervisorSize == 0)
361 Log(("vboxGuestInitFixateGuestMappings: nothing to do\n"));
362 else
363 {
364 /*
365 * We have to try several times since the host can be picky
366 * about certain addresses.
367 */
368 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
369 uint32_t cbHypervisor = pReq->hypervisorSize;
370 RTR0MEMOBJ ahTries[5];
371 uint32_t iTry;
372 bool fBitched = false;
373 Log(("vboxGuestInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
374 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
375 {
376 /*
377 * Reserve space, or if that isn't supported, create a object for
378 * some fictive physical memory and map that in to kernel space.
379 *
380 * To make the code a bit uglier, most systems cannot help with
381 * 4MB alignment, so we have to deal with that in addition to
382 * having two ways of getting the memory.
383 */
384 uint32_t uAlignment = _4M;
385 RTR0MEMOBJ hObj;
386 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
387 if (rc == VERR_NOT_SUPPORTED)
388 {
389 uAlignment = PAGE_SIZE;
390 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
391 }
392 /*
393 * If both RTR0MemObjReserveKernel calls above failed because either not supported or
394 * not implemented at all at the current platform, try to map the memory object into the
395 * virtual kernel space.
396 */
397 if (rc == VERR_NOT_SUPPORTED)
398 {
399 if (hFictive == NIL_RTR0MEMOBJ)
400 {
401 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
402 if (RT_FAILURE(rc))
403 break;
404 hFictive = hObj;
405 }
406 uAlignment = _4M;
407 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
408 if (rc == VERR_NOT_SUPPORTED)
409 {
410 uAlignment = PAGE_SIZE;
411 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
412 }
413 }
414 if (RT_FAILURE(rc))
415 {
416 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
417 rc, cbHypervisor, uAlignment, iTry));
418 fBitched = true;
419 break;
420 }
421
422 /*
423 * Try set it.
424 */
425 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
426 pReq->header.rc = VERR_INTERNAL_ERROR;
427 pReq->hypervisorSize = cbHypervisor;
428 pReq->hypervisorStart = (uintptr_t)RTR0MemObjAddress(hObj);
429 if ( uAlignment == PAGE_SIZE
430 && pReq->hypervisorStart & (_4M - 1))
431 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
432 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
433
434 rc = VbglGRPerform(&pReq->header);
435 if (RT_SUCCESS(rc))
436 {
437 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
438 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
439 RTR0MemObjAddress(pDevExt->hGuestMappings),
440 RTR0MemObjSize(pDevExt->hGuestMappings),
441 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
442 break;
443 }
444 ahTries[iTry] = hObj;
445 }
446
447 /*
448 * Cleanup failed attempts.
449 */
450 while (iTry-- > 0)
451 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
452 if ( RT_FAILURE(rc)
453 && hFictive != NIL_RTR0PTR)
454 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
455 if (RT_FAILURE(rc) && !fBitched)
456 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
457 }
458 VbglGRFree(&pReq->header);
459
460 /*
461 * We ignore failed attempts for now.
462 */
463 return VINF_SUCCESS;
464}
465
466
467/**
468 * Undo what vboxGuestInitFixateGuestMappings did.
469 *
470 * @param pDevExt The device extension.
471 */
472static void vboxGuestTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
473{
474 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
475 {
476 /*
477 * Tell the host that we're going to free the memory we reserved for
478 * it, the free it up. (Leak the memory if anything goes wrong here.)
479 */
480 VMMDevReqHypervisorInfo *pReq;
481 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
482 if (RT_SUCCESS(rc))
483 {
484 pReq->hypervisorStart = 0;
485 pReq->hypervisorSize = 0;
486 rc = VbglGRPerform(&pReq->header);
487 VbglGRFree(&pReq->header);
488 }
489 if (RT_SUCCESS(rc))
490 {
491 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
492 AssertRC(rc);
493 }
494 else
495 LogRel(("vboxGuestTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
496
497 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
498 }
499}
500
501
502/**
503 * Inflate the balloon by one chunk represented by an R0 memory object.
504 *
505 * The caller owns the balloon mutex.
506 *
507 * @returns IPRT status code.
508 * @param pMemObj Pointer to the R0 memory object.
509 * @param pReq The pre-allocated request for performing the VMMDev call.
510 */
511static int vboxGuestBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
512{
513 uint32_t iPage;
514 int rc;
515
516 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
517 {
518 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
519 pReq->aPhysPage[iPage] = phys;
520 }
521
522 pReq->fInflate = true;
523 pReq->header.size = cbChangeMemBalloonReq;
524 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
525
526 rc = VbglGRPerform(&pReq->header);
527 if (RT_FAILURE(rc))
528 LogRel(("vboxGuestBalloonInflate: VbglGRPerform failed. rc=%Rrc\n", rc));
529 return rc;
530}
531
532
533/**
534 * Deflate the balloon by one chunk - info the host and free the memory object.
535 *
536 * The caller owns the balloon mutex.
537 *
538 * @returns IPRT status code.
539 * @param pMemObj Pointer to the R0 memory object.
540 * The memory object will be freed afterwards.
541 * @param pReq The pre-allocated request for performing the VMMDev call.
542 */
543static int vboxGuestBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
544{
545 uint32_t iPage;
546 int rc;
547
548 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
549 {
550 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
551 pReq->aPhysPage[iPage] = phys;
552 }
553
554 pReq->fInflate = false;
555 pReq->header.size = cbChangeMemBalloonReq;
556 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
557
558 rc = VbglGRPerform(&pReq->header);
559 if (RT_FAILURE(rc))
560 {
561 LogRel(("vboxGuestBalloonDeflate: VbglGRPerform failed. rc=%Rrc\n", rc));
562 return rc;
563 }
564
565 rc = RTR0MemObjFree(*pMemObj, true);
566 if (RT_FAILURE(rc))
567 {
568 LogRel(("vboxGuestBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
569 return rc;
570 }
571
572 *pMemObj = NIL_RTR0MEMOBJ;
573 return VINF_SUCCESS;
574}
575
576
577/**
578 * Inflate/deflate the memory balloon and notify the host.
579 *
580 * This is a worker used by VBoxGuestCommonIOCtl_CheckMemoryBalloon - it takes
581 * the mutex.
582 *
583 * @returns VBox status code.
584 * @param pDevExt The device extension.
585 * @param pSession The session.
586 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
587 * @param pfHandleInR3 Where to return the handle-in-ring3 indicator
588 * (VINF_SUCCESS if set).
589 */
590static int vboxGuestSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, uint32_t *pfHandleInR3)
591{
592 int rc = VINF_SUCCESS;
593
594 if (pDevExt->MemBalloon.fUseKernelAPI)
595 {
596 VMMDevChangeMemBalloon *pReq;
597 uint32_t i;
598
599 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
600 {
601 LogRel(("vboxGuestSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
602 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
603 return VERR_INVALID_PARAMETER;
604 }
605
606 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
607 return VINF_SUCCESS; /* nothing to do */
608
609 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
610 && !pDevExt->MemBalloon.paMemObj)
611 {
612 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
613 if (!pDevExt->MemBalloon.paMemObj)
614 {
615 LogRel(("vboxGuestSetBalloonSizeKernel: no memory for paMemObj!\n"));
616 return VERR_NO_MEMORY;
617 }
618 }
619
620 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
621 if (RT_FAILURE(rc))
622 return rc;
623
624 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
625 {
626 /* inflate */
627 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
628 {
629 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
630 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
631 if (RT_FAILURE(rc))
632 {
633 if (rc == VERR_NOT_SUPPORTED)
634 {
635 /* not supported -- fall back to the R3-allocated memory. */
636 rc = VINF_SUCCESS;
637 pDevExt->MemBalloon.fUseKernelAPI = false;
638 Assert(pDevExt->MemBalloon.cChunks == 0);
639 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
640 }
641 /* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
642 * cannot allocate more memory => don't try further, just stop here */
643 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
644 break;
645 }
646
647 rc = vboxGuestBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
648 if (RT_FAILURE(rc))
649 {
650 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
651 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
652 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
653 break;
654 }
655 pDevExt->MemBalloon.cChunks++;
656 }
657 }
658 else
659 {
660 /* deflate */
661 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
662 {
663 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
664 if (RT_FAILURE(rc))
665 {
666 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
667 break;
668 }
669 pDevExt->MemBalloon.cChunks--;
670 }
671 }
672
673 VbglGRFree(&pReq->header);
674 }
675
676 /*
677 * Set the handle-in-ring3 indicator. When set Ring-3 will have to work
678 * the balloon changes via the other API.
679 */
680 *pfHandleInR3 = pDevExt->MemBalloon.fUseKernelAPI ? false : true;
681
682 return rc;
683}
684
685
686/**
687 * Helper to reinit the VBoxVMM communication after hibernation.
688 *
689 * @returns VBox status code.
690 * @param pDevExt The device extension.
691 * @param enmOSType The OS type.
692 */
693int VBoxGuestReinitDevExtAfterHibernation(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
694{
695 int rc = VBoxGuestReportGuestInfo(enmOSType);
696 if (RT_SUCCESS(rc))
697 {
698 rc = VBoxGuestReportDriverStatus(true /* Driver is active */);
699 if (RT_FAILURE(rc))
700 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: could not report guest driver status, rc=%Rrc\n", rc));
701 }
702 else
703 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: could not report guest information to host, rc=%Rrc\n", rc));
704 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: returned with rc=%Rrc\n", rc));
705 return rc;
706}
707
708
709/**
710 * Inflate/deflate the balloon by one chunk.
711 *
712 * Worker for VBoxGuestCommonIOCtl_ChangeMemoryBalloon - it takes the mutex.
713 *
714 * @returns VBox status code.
715 * @param pDevExt The device extension.
716 * @param pSession The session.
717 * @param u64ChunkAddr The address of the chunk to add to / remove from the
718 * balloon.
719 * @param fInflate Inflate if true, deflate if false.
720 */
721static int vboxGuestSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
722 uint64_t u64ChunkAddr, bool fInflate)
723{
724 VMMDevChangeMemBalloon *pReq;
725 int rc = VINF_SUCCESS;
726 uint32_t i;
727 PRTR0MEMOBJ pMemObj = NULL;
728
729 if (fInflate)
730 {
731 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
732 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
733 {
734 LogRel(("vboxGuestSetBalloonSize: cannot inflate balloon, already have %u chunks (max=%u)\n",
735 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
736 return VERR_INVALID_PARAMETER;
737 }
738
739 if (!pDevExt->MemBalloon.paMemObj)
740 {
741 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
742 if (!pDevExt->MemBalloon.paMemObj)
743 {
744 LogRel(("VBoxGuestSetBalloonSizeFromUser: no memory for paMemObj!\n"));
745 return VERR_NO_MEMORY;
746 }
747 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
748 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
749 }
750 }
751 else
752 {
753 if (pDevExt->MemBalloon.cChunks == 0)
754 {
755 AssertMsgFailed(("vboxGuestSetBalloonSize: cannot decrease balloon, already at size 0\n"));
756 return VERR_INVALID_PARAMETER;
757 }
758 }
759
760 /*
761 * Enumerate all memory objects and check if the object is already registered.
762 */
763 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
764 {
765 if ( fInflate
766 && !pMemObj
767 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
768 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
769 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == u64ChunkAddr)
770 {
771 if (fInflate)
772 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
773 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
774 break;
775 }
776 }
777 if (!pMemObj)
778 {
779 if (fInflate)
780 {
781 /* no free object pointer found -- should not happen */
782 return VERR_NO_MEMORY;
783 }
784
785 /* cannot free this memory as it wasn't provided before */
786 return VERR_NOT_FOUND;
787 }
788
789 /*
790 * Try inflate / default the balloon as requested.
791 */
792 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
793 if (RT_FAILURE(rc))
794 return rc;
795
796 if (fInflate)
797 {
798 rc = RTR0MemObjLockUser(pMemObj, (RTR3PTR)u64ChunkAddr, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
799 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
800 if (RT_SUCCESS(rc))
801 {
802 rc = vboxGuestBalloonInflate(pMemObj, pReq);
803 if (RT_SUCCESS(rc))
804 pDevExt->MemBalloon.cChunks++;
805 else
806 {
807 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
808 RTR0MemObjFree(*pMemObj, true);
809 *pMemObj = NIL_RTR0MEMOBJ;
810 }
811 }
812 }
813 else
814 {
815 rc = vboxGuestBalloonDeflate(pMemObj, pReq);
816 if (RT_SUCCESS(rc))
817 pDevExt->MemBalloon.cChunks--;
818 else
819 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
820 }
821
822 VbglGRFree(&pReq->header);
823 return rc;
824}
825
826
827/**
828 * Cleanup the memory balloon of a session.
829 *
830 * Will request the balloon mutex, so it must be valid and the caller must not
831 * own it already.
832 *
833 * @param pDevExt The device extension.
834 * @param pDevExt The session. Can be NULL at unload.
835 */
836static void vboxGuestCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
837{
838 RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
839 if ( pDevExt->MemBalloon.pOwner == pSession
840 || pSession == NULL /*unload*/)
841 {
842 if (pDevExt->MemBalloon.paMemObj)
843 {
844 VMMDevChangeMemBalloon *pReq;
845 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
846 if (RT_SUCCESS(rc))
847 {
848 uint32_t i;
849 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
850 {
851 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
852 if (RT_FAILURE(rc))
853 {
854 LogRel(("vboxGuestCloseMemBalloon: Deflate failed with rc=%Rrc. Will leak %u chunks.\n",
855 rc, pDevExt->MemBalloon.cChunks));
856 break;
857 }
858 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
859 pDevExt->MemBalloon.cChunks--;
860 }
861 VbglGRFree(&pReq->header);
862 }
863 else
864 LogRel(("vboxGuestCloseMemBalloon: Failed to allocate VMMDev request buffer (rc=%Rrc). Will leak %u chunks.\n",
865 rc, pDevExt->MemBalloon.cChunks));
866 RTMemFree(pDevExt->MemBalloon.paMemObj);
867 pDevExt->MemBalloon.paMemObj = NULL;
868 }
869
870 pDevExt->MemBalloon.pOwner = NULL;
871 }
872 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
873}
874
875
876/**
877 * Initializes the VBoxGuest device extension when the
878 * device driver is loaded.
879 *
880 * The native code locates the VMMDev on the PCI bus and retrieve
881 * the MMIO and I/O port ranges, this function will take care of
882 * mapping the MMIO memory (if present). Upon successful return
883 * the native code should set up the interrupt handler.
884 *
885 * @returns VBox status code.
886 *
887 * @param pDevExt The device extension. Allocated by the native code.
888 * @param IOPortBase The base of the I/O port range.
889 * @param pvMMIOBase The base of the MMIO memory mapping.
890 * This is optional, pass NULL if not present.
891 * @param cbMMIO The size of the MMIO memory mapping.
892 * This is optional, pass 0 if not present.
893 * @param enmOSType The guest OS type to report to the VMMDev.
894 * @param fFixedEvents Events that will be enabled upon init and no client
895 * will ever be allowed to mask.
896 */
897int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
898 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
899{
900 int rc, rc2;
901 unsigned i;
902
903#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
904 /*
905 * Create the release log.
906 */
907 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
908 PRTLOGGER pRelLogger;
909 rc = RTLogCreate(&pRelLogger, 0 /* fFlags */, "all",
910 "VBOX_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups, RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
911 if (RT_SUCCESS(rc))
912 RTLogRelSetDefaultInstance(pRelLogger);
913 /** @todo Add native hook for getting logger config parameters and setting
914 * them. On linux we should use the module parameter stuff... */
915#endif
916
917 /*
918 * Adjust fFixedEvents.
919 */
920#ifdef VBOX_WITH_HGCM
921 fFixedEvents |= VMMDEV_EVENT_HGCM;
922#endif
923
924 /*
925 * Initialize the data.
926 */
927 pDevExt->IOPortBase = IOPortBase;
928 pDevExt->pVMMDevMemory = NULL;
929 pDevExt->fFixedEvents = fFixedEvents;
930 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
931 pDevExt->EventSpinlock = NIL_RTSPINLOCK;
932 pDevExt->pIrqAckEvents = NULL;
933 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
934 RTListInit(&pDevExt->WaitList);
935#ifdef VBOX_WITH_HGCM
936 RTListInit(&pDevExt->HGCMWaitList);
937#endif
938#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
939 RTListInit(&pDevExt->WakeUpList);
940#endif
941 RTListInit(&pDevExt->WokenUpList);
942 RTListInit(&pDevExt->FreeList);
943 RTListInit(&pDevExt->SessionList);
944#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
945 pDevExt->fVRDPEnabled = false;
946#endif
947 pDevExt->fLoggingEnabled = false;
948 pDevExt->f32PendingEvents = 0;
949 pDevExt->u32MousePosChangedSeq = 0;
950 pDevExt->SessionSpinlock = NIL_RTSPINLOCK;
951 pDevExt->MemBalloon.hMtx = NIL_RTSEMFASTMUTEX;
952 pDevExt->MemBalloon.cChunks = 0;
953 pDevExt->MemBalloon.cMaxChunks = 0;
954 pDevExt->MemBalloon.fUseKernelAPI = true;
955 pDevExt->MemBalloon.paMemObj = NULL;
956 pDevExt->MemBalloon.pOwner = NULL;
957 pDevExt->MouseNotifyCallback.pfnNotify = NULL;
958 pDevExt->MouseNotifyCallback.pvUser = NULL;
959
960 /*
961 * If there is an MMIO region validate the version and size.
962 */
963 if (pvMMIOBase)
964 {
965 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
966 Assert(cbMMIO);
967 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
968 && pVMMDev->u32Size >= 32
969 && pVMMDev->u32Size <= cbMMIO)
970 {
971 pDevExt->pVMMDevMemory = pVMMDev;
972 Log(("VBoxGuestInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
973 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
974 }
975 else /* try live without it. */
976 LogRel(("VBoxGuestInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
977 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
978 }
979
980 pDevExt->u32AcquireModeGuestCaps = 0;
981 pDevExt->u32SetModeGuestCaps = 0;
982 pDevExt->u32GuestCaps = 0;
983
984 /*
985 * Create the wait and session spinlocks as well as the ballooning mutex.
986 */
987 rc = RTSpinlockCreate(&pDevExt->EventSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestEvent");
988 if (RT_SUCCESS(rc))
989 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestSession");
990 if (RT_FAILURE(rc))
991 {
992 LogRel(("VBoxGuestInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
993 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
994 RTSpinlockDestroy(pDevExt->EventSpinlock);
995 return rc;
996 }
997
998 rc = RTSemFastMutexCreate(&pDevExt->MemBalloon.hMtx);
999 if (RT_FAILURE(rc))
1000 {
1001 LogRel(("VBoxGuestInitDevExt: failed to create mutex, rc=%Rrc!\n", rc));
1002 RTSpinlockDestroy(pDevExt->SessionSpinlock);
1003 RTSpinlockDestroy(pDevExt->EventSpinlock);
1004 return rc;
1005 }
1006
1007 /*
1008 * Initialize the guest library and report the guest info back to VMMDev,
1009 * set the interrupt control filter mask, and fixate the guest mappings
1010 * made by the VMM.
1011 */
1012 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
1013 if (RT_SUCCESS(rc))
1014 {
1015 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
1016 if (RT_SUCCESS(rc))
1017 {
1018 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
1019 Assert(pDevExt->PhysIrqAckEvents != 0);
1020
1021 rc = VBoxGuestReportGuestInfo(enmOSType);
1022 if (RT_SUCCESS(rc))
1023 {
1024 /* Set the fixed event and disable the guest graphics capability
1025 * by default. The guest specific graphics driver will re-enable
1026 * the graphics capability if and when appropriate. */
1027 rc = vboxGuestUpdateHostFlags(pDevExt, NULL,
1028 HostFlags_FilterMask
1029 | HostFlags_Capabilities);
1030 if (RT_SUCCESS(rc))
1031 {
1032 vboxGuestInitFixateGuestMappings(pDevExt);
1033
1034 rc = VBoxGuestReportDriverStatus(true /* Driver is active */);
1035 if (RT_FAILURE(rc))
1036 LogRel(("VBoxGuestInitDevExt: VBoxReportGuestDriverStatus failed, rc=%Rrc\n", rc));
1037
1038 Log(("VBoxGuestInitDevExt: returns success\n"));
1039 return VINF_SUCCESS;
1040 }
1041 else
1042 LogRelFunc(("failed to set host flags, rc=%Rrc\n", rc));
1043 }
1044 else
1045 LogRel(("VBoxGuestInitDevExt: VBoxReportGuestInfo failed, rc=%Rrc\n", rc));
1046 VbglGRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
1047 }
1048 else
1049 LogRel(("VBoxGuestInitDevExt: VBoxGRAlloc failed, rc=%Rrc\n", rc));
1050
1051 VbglTerminate();
1052 }
1053 else
1054 LogRel(("VBoxGuestInitDevExt: VbglInit failed, rc=%Rrc\n", rc));
1055
1056 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
1057 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
1058 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
1059
1060#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
1061 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
1062 RTLogDestroy(RTLogSetDefaultInstance(NULL));
1063#endif
1064 return rc; /* (failed) */
1065}
1066
1067
1068/**
1069 * Deletes all the items in a wait chain.
1070 * @param pList The head of the chain.
1071 */
1072static void VBoxGuestDeleteWaitList(PRTLISTNODE pList)
1073{
1074 while (!RTListIsEmpty(pList))
1075 {
1076 int rc2;
1077 PVBOXGUESTWAIT pWait = RTListGetFirst(pList, VBOXGUESTWAIT, ListNode);
1078 RTListNodeRemove(&pWait->ListNode);
1079
1080 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
1081 pWait->Event = NIL_RTSEMEVENTMULTI;
1082 pWait->pSession = NULL;
1083 RTMemFree(pWait);
1084 }
1085}
1086
1087
1088/**
1089 * Destroys the VBoxGuest device extension.
1090 *
1091 * The native code should call this before the driver is loaded,
1092 * but don't call this on shutdown.
1093 *
1094 * @param pDevExt The device extension.
1095 */
1096void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
1097{
1098 int rc2;
1099 Log(("VBoxGuestDeleteDevExt:\n"));
1100 Log(("VBoxGuest: The additions driver is terminating.\n"));
1101
1102 /*
1103 * Clean up the bits that involves the host first.
1104 */
1105 vboxGuestTermUnfixGuestMappings(pDevExt);
1106 if (!RTListIsEmpty(&pDevExt->SessionList))
1107 {
1108 LogRelFunc(("session list not empty!\n"));
1109 RTListInit(&pDevExt->SessionList);
1110 }
1111 /* Update the host flags (mouse status etc) not to reflect this session. */
1112 pDevExt->fFixedEvents = 0;
1113 vboxGuestUpdateHostFlags(pDevExt, NULL, HostFlags_All);
1114 vboxGuestCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
1115
1116 /*
1117 * Cleanup all the other resources.
1118 */
1119 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
1120 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
1121 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
1122
1123 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
1124#ifdef VBOX_WITH_HGCM
1125 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
1126#endif
1127#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1128 VBoxGuestDeleteWaitList(&pDevExt->WakeUpList);
1129#endif
1130 VBoxGuestDeleteWaitList(&pDevExt->WokenUpList);
1131 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
1132
1133 VbglTerminate();
1134
1135 pDevExt->pVMMDevMemory = NULL;
1136
1137 pDevExt->IOPortBase = 0;
1138 pDevExt->pIrqAckEvents = NULL;
1139
1140#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
1141 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
1142 RTLogDestroy(RTLogSetDefaultInstance(NULL));
1143#endif
1144
1145}
1146
1147
1148/**
1149 * Creates a VBoxGuest user session.
1150 *
1151 * The native code calls this when a ring-3 client opens the device.
1152 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
1153 *
1154 * @returns VBox status code.
1155 * @param pDevExt The device extension.
1156 * @param ppSession Where to store the session on success.
1157 */
1158int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
1159{
1160 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
1161 if (RT_UNLIKELY(!pSession))
1162 {
1163 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
1164 return VERR_NO_MEMORY;
1165 }
1166
1167 pSession->Process = RTProcSelf();
1168 pSession->R0Process = RTR0ProcHandleSelf();
1169 pSession->pDevExt = pDevExt;
1170 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1171 RTListAppend(&pDevExt->SessionList, &pSession->ListNode);
1172 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
1173
1174 *ppSession = pSession;
1175 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1176 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1177 return VINF_SUCCESS;
1178}
1179
1180
1181/**
1182 * Creates a VBoxGuest kernel session.
1183 *
1184 * The native code calls this when a ring-0 client connects to the device.
1185 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
1186 *
1187 * @returns VBox status code.
1188 * @param pDevExt The device extension.
1189 * @param ppSession Where to store the session on success.
1190 */
1191int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
1192{
1193 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
1194 if (RT_UNLIKELY(!pSession))
1195 {
1196 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
1197 return VERR_NO_MEMORY;
1198 }
1199
1200 pSession->Process = NIL_RTPROCESS;
1201 pSession->R0Process = NIL_RTR0PROCESS;
1202 pSession->pDevExt = pDevExt;
1203 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1204 RTListAppend(&pDevExt->SessionList, &pSession->ListNode);
1205 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
1206
1207 *ppSession = pSession;
1208 LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1209 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1210 return VINF_SUCCESS;
1211}
1212
1213static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession);
1214
1215/**
1216 * Closes a VBoxGuest session.
1217 *
1218 * @param pDevExt The device extension.
1219 * @param pSession The session to close (and free).
1220 */
1221void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1222{
1223 unsigned i; NOREF(i);
1224 Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1225 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1226
1227 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1228 RTListNodeRemove(&pSession->ListNode);
1229 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
1230 VBoxGuestCommonGuestCapsAcquire(pDevExt, pSession, 0, UINT32_MAX, VBOXGUESTCAPSACQUIRE_FLAGS_NONE);
1231
1232 VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
1233
1234#ifdef VBOX_WITH_HGCM
1235 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1236 if (pSession->aHGCMClientIds[i])
1237 {
1238 VBoxGuestHGCMDisconnectInfo Info;
1239 Info.result = 0;
1240 Info.u32ClientID = pSession->aHGCMClientIds[i];
1241 pSession->aHGCMClientIds[i] = 0;
1242 Log(("VBoxGuestCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
1243 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1244 }
1245#endif
1246
1247 pSession->pDevExt = NULL;
1248 pSession->Process = NIL_RTPROCESS;
1249 pSession->R0Process = NIL_RTR0PROCESS;
1250 vboxGuestCloseMemBalloon(pDevExt, pSession);
1251 RTMemFree(pSession);
1252 /* Update the host flags (mouse status etc) not to reflect this session. */
1253 vboxGuestUpdateHostFlags(pDevExt, NULL, HostFlags_All);
1254}
1255
1256
1257/**
1258 * Allocates a wait-for-event entry.
1259 *
1260 * @returns The wait-for-event entry.
1261 * @param pDevExt The device extension.
1262 * @param pSession The session that's allocating this. Can be NULL.
1263 */
1264static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1265{
1266 /*
1267 * Allocate it one way or the other.
1268 */
1269 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1270 if (pWait)
1271 {
1272 RTSpinlockAcquire(pDevExt->EventSpinlock);
1273
1274 pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1275 if (pWait)
1276 RTListNodeRemove(&pWait->ListNode);
1277
1278 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1279 }
1280 if (!pWait)
1281 {
1282 static unsigned s_cErrors = 0;
1283 int rc;
1284
1285 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1286 if (!pWait)
1287 {
1288 if (s_cErrors++ < 32)
1289 LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
1290 return NULL;
1291 }
1292
1293 rc = RTSemEventMultiCreate(&pWait->Event);
1294 if (RT_FAILURE(rc))
1295 {
1296 if (s_cErrors++ < 32)
1297 LogRel(("VBoxGuestCommonIOCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
1298 RTMemFree(pWait);
1299 return NULL;
1300 }
1301
1302 pWait->ListNode.pNext = NULL;
1303 pWait->ListNode.pPrev = NULL;
1304 }
1305
1306 /*
1307 * Zero members just as an precaution.
1308 */
1309 pWait->fReqEvents = 0;
1310 pWait->fResEvents = 0;
1311#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1312 pWait->fPendingWakeUp = false;
1313 pWait->fFreeMe = false;
1314#endif
1315 pWait->pSession = pSession;
1316#ifdef VBOX_WITH_HGCM
1317 pWait->pHGCMReq = NULL;
1318#endif
1319 RTSemEventMultiReset(pWait->Event);
1320 return pWait;
1321}
1322
1323
1324/**
1325 * Frees the wait-for-event entry.
1326 *
1327 * The caller must own the wait spinlock !
1328 * The entry must be in a list!
1329 *
1330 * @param pDevExt The device extension.
1331 * @param pWait The wait-for-event entry to free.
1332 */
1333static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1334{
1335 pWait->fReqEvents = 0;
1336 pWait->fResEvents = 0;
1337#ifdef VBOX_WITH_HGCM
1338 pWait->pHGCMReq = NULL;
1339#endif
1340#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1341 Assert(!pWait->fFreeMe);
1342 if (pWait->fPendingWakeUp)
1343 pWait->fFreeMe = true;
1344 else
1345#endif
1346 {
1347 RTListNodeRemove(&pWait->ListNode);
1348 RTListAppend(&pDevExt->FreeList, &pWait->ListNode);
1349 }
1350}
1351
1352
1353/**
1354 * Frees the wait-for-event entry.
1355 *
1356 * @param pDevExt The device extension.
1357 * @param pWait The wait-for-event entry to free.
1358 */
1359static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1360{
1361 RTSpinlockAcquire(pDevExt->EventSpinlock);
1362 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1363 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1364}
1365
1366
1367#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1368/**
1369 * Processes the wake-up list.
1370 *
1371 * All entries in the wake-up list gets signalled and moved to the woken-up
1372 * list.
1373 *
1374 * @param pDevExt The device extension.
1375 */
1376void VBoxGuestWaitDoWakeUps(PVBOXGUESTDEVEXT pDevExt)
1377{
1378 if (!RTListIsEmpty(&pDevExt->WakeUpList))
1379 {
1380 RTSpinlockAcquire(pDevExt->EventSpinlock);
1381 for (;;)
1382 {
1383 int rc;
1384 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->WakeUpList, VBOXGUESTWAIT, ListNode);
1385 if (!pWait)
1386 break;
1387 pWait->fPendingWakeUp = true;
1388 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1389
1390 rc = RTSemEventMultiSignal(pWait->Event);
1391 AssertRC(rc);
1392
1393 RTSpinlockAcquire(pDevExt->EventSpinlock);
1394 pWait->fPendingWakeUp = false;
1395 if (!pWait->fFreeMe)
1396 {
1397 RTListNodeRemove(&pWait->ListNode);
1398 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1399 }
1400 else
1401 {
1402 pWait->fFreeMe = false;
1403 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1404 }
1405 }
1406 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1407 }
1408}
1409#endif /* VBOXGUEST_USE_DEFERRED_WAKE_UP */
1410
1411
1412/**
1413 * Modifies the guest capabilities.
1414 *
1415 * Should be called during driver init and termination.
1416 *
1417 * @returns VBox status code.
1418 * @param fOr The Or mask (what to enable).
1419 * @param fNot The Not mask (what to disable).
1420 */
1421int VBoxGuestSetGuestCapabilities(uint32_t fOr, uint32_t fNot)
1422{
1423 VMMDevReqGuestCapabilities2 *pReq;
1424 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
1425 if (RT_FAILURE(rc))
1426 {
1427 Log(("VBoxGuestSetGuestCapabilities: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1428 sizeof(*pReq), sizeof(*pReq), rc));
1429 return rc;
1430 }
1431
1432 pReq->u32OrMask = fOr;
1433 pReq->u32NotMask = fNot;
1434
1435 rc = VbglGRPerform(&pReq->header);
1436 if (RT_FAILURE(rc))
1437 Log(("VBoxGuestSetGuestCapabilities: VbglGRPerform failed, rc=%Rrc!\n", rc));
1438
1439 VbglGRFree(&pReq->header);
1440 return rc;
1441}
1442
1443
1444/**
1445 * Implements the fast (no input or output) type of IOCtls.
1446 *
1447 * This is currently just a placeholder stub inherited from the support driver code.
1448 *
1449 * @returns VBox status code.
1450 * @param iFunction The IOCtl function number.
1451 * @param pDevExt The device extension.
1452 * @param pSession The session.
1453 */
1454int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1455{
1456 Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1457
1458 NOREF(iFunction);
1459 NOREF(pDevExt);
1460 NOREF(pSession);
1461 return VERR_NOT_SUPPORTED;
1462}
1463
1464
1465/**
1466 * Return the VMM device port.
1467 *
1468 * returns IPRT status code.
1469 * @param pDevExt The device extension.
1470 * @param pInfo The request info.
1471 * @param pcbDataReturned (out) contains the number of bytes to return.
1472 */
1473static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
1474{
1475 Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
1476 pInfo->portAddress = pDevExt->IOPortBase;
1477 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
1478 if (pcbDataReturned)
1479 *pcbDataReturned = sizeof(*pInfo);
1480 return VINF_SUCCESS;
1481}
1482
1483
1484#ifndef RT_OS_WINDOWS
1485/**
1486 * Set the callback for the kernel mouse handler.
1487 *
1488 * returns IPRT status code.
1489 * @param pDevExt The device extension.
1490 * @param pNotify The new callback information.
1491 */
1492int VBoxGuestCommonIOCtl_SetMouseNotifyCallback(PVBOXGUESTDEVEXT pDevExt, VBoxGuestMouseSetNotifyCallback *pNotify)
1493{
1494 Log(("VBoxGuestCommonIOCtl: SET_MOUSE_NOTIFY_CALLBACK\n"));
1495
1496 RTSpinlockAcquire(pDevExt->EventSpinlock);
1497 pDevExt->MouseNotifyCallback = *pNotify;
1498 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1499 return VINF_SUCCESS;
1500}
1501#endif
1502
1503
1504/**
1505 * Worker VBoxGuestCommonIOCtl_WaitEvent.
1506 *
1507 * The caller enters the spinlock, we leave it.
1508 *
1509 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
1510 */
1511DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestWaitEventInfo *pInfo,
1512 int iEvent, const uint32_t fReqEvents)
1513{
1514 uint32_t fMatches = VBoxGuestCommonGetAndCleanPendingEventsLocked(pDevExt, pSession, fReqEvents);
1515 if (fMatches || pSession->fPendingCancelWaitEvents)
1516 {
1517 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1518
1519 pInfo->u32EventFlagsOut = fMatches;
1520 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1521 if (fReqEvents & ~((uint32_t)1 << iEvent))
1522 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1523 else
1524 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1525 pSession->fPendingCancelWaitEvents = false;
1526 return VINF_SUCCESS;
1527 }
1528 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1529 return VERR_TIMEOUT;
1530}
1531
1532
1533static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1534 VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned, bool fInterruptible)
1535{
1536 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
1537 uint32_t fResEvents;
1538 int iEvent;
1539 PVBOXGUESTWAIT pWait;
1540 int rc;
1541
1542 pInfo->u32EventFlagsOut = 0;
1543 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1544 if (pcbDataReturned)
1545 *pcbDataReturned = sizeof(*pInfo);
1546
1547 /*
1548 * Copy and verify the input mask.
1549 */
1550 iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
1551 if (RT_UNLIKELY(iEvent < 0))
1552 {
1553 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
1554 return VERR_INVALID_PARAMETER;
1555 }
1556
1557 /*
1558 * Check the condition up front, before doing the wait-for-event allocations.
1559 */
1560 RTSpinlockAcquire(pDevExt->EventSpinlock);
1561 rc = WaitEventCheckCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
1562 if (rc == VINF_SUCCESS)
1563 return rc;
1564
1565 if (!pInfo->u32TimeoutIn)
1566 {
1567 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1568 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
1569 return VERR_TIMEOUT;
1570 }
1571
1572 pWait = VBoxGuestWaitAlloc(pDevExt, pSession);
1573 if (!pWait)
1574 return VERR_NO_MEMORY;
1575 pWait->fReqEvents = fReqEvents;
1576
1577 /*
1578 * We've got the wait entry now, re-enter the spinlock and check for the condition.
1579 * If the wait condition is met, return.
1580 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
1581 */
1582 RTSpinlockAcquire(pDevExt->EventSpinlock);
1583 RTListAppend(&pDevExt->WaitList, &pWait->ListNode);
1584 rc = WaitEventCheckCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
1585 if (rc == VINF_SUCCESS)
1586 {
1587 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1588 return rc;
1589 }
1590
1591 if (fInterruptible)
1592 rc = RTSemEventMultiWaitNoResume(pWait->Event,
1593 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1594 else
1595 rc = RTSemEventMultiWait(pWait->Event,
1596 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1597
1598 /*
1599 * There is one special case here and that's when the semaphore is
1600 * destroyed upon device driver unload. This shouldn't happen of course,
1601 * but in case it does, just get out of here ASAP.
1602 */
1603 if (rc == VERR_SEM_DESTROYED)
1604 return rc;
1605
1606 /*
1607 * Unlink the wait item and dispose of it.
1608 */
1609 RTSpinlockAcquire(pDevExt->EventSpinlock);
1610 fResEvents = pWait->fResEvents;
1611 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1612 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1613
1614 /*
1615 * Now deal with the return code.
1616 */
1617 if ( fResEvents
1618 && fResEvents != UINT32_MAX)
1619 {
1620 pInfo->u32EventFlagsOut = fResEvents;
1621 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1622 if (fReqEvents & ~((uint32_t)1 << iEvent))
1623 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1624 else
1625 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1626 rc = VINF_SUCCESS;
1627 }
1628 else if ( fResEvents == UINT32_MAX
1629 || rc == VERR_INTERRUPTED)
1630 {
1631 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
1632 rc = VERR_INTERRUPTED;
1633 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
1634 }
1635 else if (rc == VERR_TIMEOUT)
1636 {
1637 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1638 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT (2)\n"));
1639 }
1640 else
1641 {
1642 if (RT_SUCCESS(rc))
1643 {
1644 static unsigned s_cErrors = 0;
1645 if (s_cErrors++ < 32)
1646 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
1647 rc = VERR_INTERNAL_ERROR;
1648 }
1649 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1650 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc\n", rc));
1651 }
1652
1653 return rc;
1654}
1655
1656
1657static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1658{
1659 PVBOXGUESTWAIT pWait;
1660 PVBOXGUESTWAIT pSafe;
1661 int rc = 0;
1662 /* Was as least one WAITEVENT in process for this session? If not we
1663 * set a flag that the next call should be interrupted immediately. This
1664 * is needed so that a user thread can reliably interrupt another one in a
1665 * WAITEVENT loop. */
1666 bool fCancelledOne = false;
1667
1668 Log(("VBoxGuestCommonIOCtl: CANCEL_ALL_WAITEVENTS\n"));
1669
1670 /*
1671 * Walk the event list and wake up anyone with a matching session.
1672 */
1673 RTSpinlockAcquire(pDevExt->EventSpinlock);
1674 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
1675 {
1676 if (pWait->pSession == pSession)
1677 {
1678 fCancelledOne = true;
1679 pWait->fResEvents = UINT32_MAX;
1680 RTListNodeRemove(&pWait->ListNode);
1681#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1682 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
1683#else
1684 rc |= RTSemEventMultiSignal(pWait->Event);
1685 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1686#endif
1687 }
1688 }
1689 if (!fCancelledOne)
1690 pSession->fPendingCancelWaitEvents = true;
1691 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1692 Assert(rc == 0);
1693 NOREF(rc);
1694
1695#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1696 VBoxGuestWaitDoWakeUps(pDevExt);
1697#endif
1698
1699 return VINF_SUCCESS;
1700}
1701
1702/**
1703 * Checks if the VMM request is allowed in the context of the given session.
1704 *
1705 * @returns VINF_SUCCESS or VERR_PERMISSION_DENIED.
1706 * @param pSession The calling session.
1707 * @param enmType The request type.
1708 * @param pReqHdr The request.
1709 */
1710static int VBoxGuestCheckIfVMMReqAllowed(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VMMDevRequestType enmType,
1711 VMMDevRequestHeader const *pReqHdr)
1712{
1713 /*
1714 * Categorize the request being made.
1715 */
1716 /** @todo This need quite some more work! */
1717 enum
1718 {
1719 kLevel_Invalid, kLevel_NoOne, kLevel_OnlyVBoxGuest, kLevel_OnlyKernel, kLevel_TrustedUsers, kLevel_AllUsers
1720 } enmRequired;
1721 switch (enmType)
1722 {
1723 /*
1724 * Deny access to anything we don't know or provide specialized I/O controls for.
1725 */
1726#ifdef VBOX_WITH_HGCM
1727 case VMMDevReq_HGCMConnect:
1728 case VMMDevReq_HGCMDisconnect:
1729# ifdef VBOX_WITH_64_BITS_GUESTS
1730 case VMMDevReq_HGCMCall32:
1731 case VMMDevReq_HGCMCall64:
1732# else
1733 case VMMDevReq_HGCMCall:
1734# endif /* VBOX_WITH_64_BITS_GUESTS */
1735 case VMMDevReq_HGCMCancel:
1736 case VMMDevReq_HGCMCancel2:
1737#endif /* VBOX_WITH_HGCM */
1738 default:
1739 enmRequired = kLevel_NoOne;
1740 break;
1741
1742 /*
1743 * There are a few things only this driver can do (and it doesn't use
1744 * the VMMRequst I/O control route anyway, but whatever).
1745 */
1746 case VMMDevReq_ReportGuestInfo:
1747 case VMMDevReq_ReportGuestInfo2:
1748 case VMMDevReq_GetHypervisorInfo:
1749 case VMMDevReq_SetHypervisorInfo:
1750 case VMMDevReq_RegisterPatchMemory:
1751 case VMMDevReq_DeregisterPatchMemory:
1752 case VMMDevReq_GetMemBalloonChangeRequest:
1753 enmRequired = kLevel_OnlyVBoxGuest;
1754 break;
1755
1756 /*
1757 * Trusted users apps only.
1758 */
1759 case VMMDevReq_QueryCredentials:
1760 case VMMDevReq_ReportCredentialsJudgement:
1761 case VMMDevReq_RegisterSharedModule:
1762 case VMMDevReq_UnregisterSharedModule:
1763 case VMMDevReq_WriteCoreDump:
1764 case VMMDevReq_GetCpuHotPlugRequest:
1765 case VMMDevReq_SetCpuHotPlugStatus:
1766 case VMMDevReq_CheckSharedModules:
1767 case VMMDevReq_GetPageSharingStatus:
1768 case VMMDevReq_DebugIsPageShared:
1769 case VMMDevReq_ReportGuestStats:
1770 case VMMDevReq_ReportGuestUserState:
1771 case VMMDevReq_GetStatisticsChangeRequest:
1772 case VMMDevReq_ChangeMemBalloon:
1773 enmRequired = kLevel_TrustedUsers;
1774 break;
1775
1776 /*
1777 * Anyone. But not for CapsAcquire mode
1778 */
1779 case VMMDevReq_SetGuestCapabilities:
1780 {
1781 VMMDevReqGuestCapabilities2 *pCaps = (VMMDevReqGuestCapabilities2*)pReqHdr;
1782 uint32_t fAcquireCaps = 0;
1783 if (!VBoxGuestCommonGuestCapsModeSet(pDevExt, pCaps->u32OrMask, false, &fAcquireCaps))
1784 {
1785 AssertFailed();
1786 LogRel(("calling caps set for acquired caps %d\n", pCaps->u32OrMask));
1787 enmRequired = kLevel_NoOne;
1788 break;
1789 }
1790 /* hack to adjust the notcaps.
1791 * @todo: move to a better place
1792 * user-mode apps are allowed to pass any mask to the notmask,
1793 * the driver cleans up them accordingly */
1794 pCaps->u32NotMask &= ~fAcquireCaps;
1795 /* do not break, make it fall through to the below enmRequired setting */
1796 }
1797 /*
1798 * Anyone.
1799 */
1800 case VMMDevReq_GetMouseStatus:
1801 case VMMDevReq_SetMouseStatus:
1802 case VMMDevReq_SetPointerShape:
1803 case VMMDevReq_GetHostVersion:
1804 case VMMDevReq_Idle:
1805 case VMMDevReq_GetHostTime:
1806 case VMMDevReq_SetPowerStatus:
1807 case VMMDevReq_AcknowledgeEvents:
1808 case VMMDevReq_CtlGuestFilterMask:
1809 case VMMDevReq_ReportGuestStatus:
1810 case VMMDevReq_GetDisplayChangeRequest:
1811 case VMMDevReq_VideoModeSupported:
1812 case VMMDevReq_GetHeightReduction:
1813 case VMMDevReq_GetDisplayChangeRequest2:
1814 case VMMDevReq_VideoModeSupported2:
1815 case VMMDevReq_VideoAccelEnable:
1816 case VMMDevReq_VideoAccelFlush:
1817 case VMMDevReq_VideoSetVisibleRegion:
1818 case VMMDevReq_GetDisplayChangeRequestEx:
1819 case VMMDevReq_GetSeamlessChangeRequest:
1820 case VMMDevReq_GetVRDPChangeRequest:
1821 case VMMDevReq_LogString:
1822 case VMMDevReq_GetSessionId:
1823 enmRequired = kLevel_AllUsers;
1824 break;
1825
1826 /*
1827 * Depends on the request parameters...
1828 */
1829 /** @todo this have to be changed into an I/O control and the facilities
1830 * tracked in the session so they can automatically be failed when the
1831 * session terminates without reporting the new status.
1832 *
1833 * The information presented by IGuest is not reliable without this! */
1834 case VMMDevReq_ReportGuestCapabilities:
1835 switch (((VMMDevReportGuestStatus const *)pReqHdr)->guestStatus.facility)
1836 {
1837 case VBoxGuestFacilityType_All:
1838 case VBoxGuestFacilityType_VBoxGuestDriver:
1839 enmRequired = kLevel_OnlyVBoxGuest;
1840 break;
1841 case VBoxGuestFacilityType_VBoxService:
1842 enmRequired = kLevel_TrustedUsers;
1843 break;
1844 case VBoxGuestFacilityType_VBoxTrayClient:
1845 case VBoxGuestFacilityType_Seamless:
1846 case VBoxGuestFacilityType_Graphics:
1847 default:
1848 enmRequired = kLevel_AllUsers;
1849 break;
1850 }
1851 break;
1852 }
1853
1854 /*
1855 * Check against the session.
1856 */
1857 switch (enmRequired)
1858 {
1859 default:
1860 case kLevel_NoOne:
1861 break;
1862 case kLevel_OnlyVBoxGuest:
1863 case kLevel_OnlyKernel:
1864 if (pSession->R0Process == NIL_RTR0PROCESS)
1865 return VINF_SUCCESS;
1866 break;
1867 case kLevel_TrustedUsers:
1868 case kLevel_AllUsers:
1869 return VINF_SUCCESS;
1870 }
1871
1872 return VERR_PERMISSION_DENIED;
1873}
1874
1875static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1876 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
1877{
1878 int rc;
1879 VMMDevRequestHeader *pReqCopy;
1880
1881 /*
1882 * Validate the header and request size.
1883 */
1884 const VMMDevRequestType enmType = pReqHdr->requestType;
1885 const uint32_t cbReq = pReqHdr->size;
1886 const uint32_t cbMinSize = vmmdevGetRequestSize(enmType);
1887
1888 Log(("VBoxGuestCommonIOCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
1889
1890 if (cbReq < cbMinSize)
1891 {
1892 LogRel(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
1893 cbReq, cbMinSize, enmType));
1894 return VERR_INVALID_PARAMETER;
1895 }
1896 if (cbReq > cbData)
1897 {
1898 LogRel(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
1899 cbData, cbReq, enmType));
1900 return VERR_INVALID_PARAMETER;
1901 }
1902 rc = VbglGRVerify(pReqHdr, cbData);
1903 if (RT_FAILURE(rc))
1904 {
1905 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc!!\n",
1906 cbData, cbReq, enmType, rc));
1907 return rc;
1908 }
1909
1910 rc = VBoxGuestCheckIfVMMReqAllowed(pDevExt, pSession, enmType, pReqHdr);
1911 if (RT_FAILURE(rc))
1912 {
1913 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: Operation not allowed! type=%#x rc=%Rrc\n", enmType, rc));
1914 return rc;
1915 }
1916
1917 /*
1918 * Make a copy of the request in the physical memory heap so
1919 * the VBoxGuestLibrary can more easily deal with the request.
1920 * (This is really a waste of time since the OS or the OS specific
1921 * code has already buffered or locked the input/output buffer, but
1922 * it does makes things a bit simpler wrt to phys address.)
1923 */
1924 rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
1925 if (RT_FAILURE(rc))
1926 {
1927 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1928 cbReq, cbReq, rc));
1929 return rc;
1930 }
1931 memcpy(pReqCopy, pReqHdr, cbReq);
1932
1933 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
1934 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
1935
1936 rc = VbglGRPerform(pReqCopy);
1937 if ( RT_SUCCESS(rc)
1938 && RT_SUCCESS(pReqCopy->rc))
1939 {
1940 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
1941 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
1942
1943 memcpy(pReqHdr, pReqCopy, cbReq);
1944 if (pcbDataReturned)
1945 *pcbDataReturned = cbReq;
1946 }
1947 else if (RT_FAILURE(rc))
1948 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
1949 else
1950 {
1951 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
1952 rc = pReqCopy->rc;
1953 }
1954
1955 VbglGRFree(pReqCopy);
1956 return rc;
1957}
1958
1959
1960static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt,
1961 PVBOXGUESTSESSION pSession,
1962 VBoxGuestFilterMaskInfo *pInfo)
1963{
1964 int rc;
1965
1966 if ((pInfo->u32OrMask | pInfo->u32NotMask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
1967 return VERR_INVALID_PARAMETER;
1968 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1969 pSession->fFilterMask |= pInfo->u32OrMask;
1970 pSession->fFilterMask &= ~pInfo->u32NotMask;
1971 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
1972 rc = vboxGuestUpdateHostFlags(pDevExt, pSession, HostFlags_FilterMask);
1973 return rc;
1974}
1975
1976
1977static int VBoxGuestCommonIOCtl_SetCapabilities(PVBOXGUESTDEVEXT pDevExt,
1978 PVBOXGUESTSESSION pSession,
1979 VBoxGuestSetCapabilitiesInfo *pInfo)
1980{
1981 int rc;
1982
1983 if ( (pInfo->u32OrMask | pInfo->u32NotMask)
1984 & ~VMMDEV_GUEST_CAPABILITIES_MASK)
1985 return VERR_INVALID_PARAMETER;
1986 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1987 pSession->fCapabilities |= pInfo->u32OrMask;
1988 pSession->fCapabilities &= ~pInfo->u32NotMask;
1989 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
1990 rc = vboxGuestUpdateHostFlags(pDevExt, pSession, HostFlags_Capabilities);
1991 return rc;
1992}
1993
1994
1995/**
1996 * Sets the mouse status features for this session and updates them
1997 * globally.
1998 *
1999 * @returns VBox status code.
2000 *
2001 * @param pDevExt The device extention.
2002 * @param pSession The session.
2003 * @param fFeatures New bitmap of enabled features.
2004 */
2005static int vboxGuestCommonIOCtl_SetMouseStatus(PVBOXGUESTDEVEXT pDevExt,
2006 PVBOXGUESTSESSION pSession,
2007 uint32_t fFeatures)
2008{
2009 int rc;
2010
2011 if (fFeatures & ~VMMDEV_MOUSE_GUEST_MASK)
2012 return VERR_INVALID_PARAMETER;
2013 /* Since this is more of a negative feature we invert it to get the real
2014 * feature (when the guest does not need the host cursor). */
2015 fFeatures ^= VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR;
2016 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2017 pSession->fMouseStatus = fFeatures;
2018 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
2019 rc = vboxGuestUpdateHostFlags(pDevExt, pSession, HostFlags_MouseStatus);
2020 return rc;
2021}
2022
2023#ifdef VBOX_WITH_HGCM
2024
2025AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
2026
2027/** Worker for VBoxGuestHGCMAsyncWaitCallback*. */
2028static int VBoxGuestHGCMAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
2029 bool fInterruptible, uint32_t cMillies)
2030{
2031 int rc;
2032
2033 /*
2034 * Check to see if the condition was met by the time we got here.
2035 *
2036 * We create a simple poll loop here for dealing with out-of-memory
2037 * conditions since the caller isn't necessarily able to deal with
2038 * us returning too early.
2039 */
2040 PVBOXGUESTWAIT pWait;
2041 for (;;)
2042 {
2043 RTSpinlockAcquire(pDevExt->EventSpinlock);
2044 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
2045 {
2046 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
2047 return VINF_SUCCESS;
2048 }
2049 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
2050
2051 pWait = VBoxGuestWaitAlloc(pDevExt, NULL);
2052 if (pWait)
2053 break;
2054 if (fInterruptible)
2055 return VERR_INTERRUPTED;
2056 RTThreadSleep(1);
2057 }
2058 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
2059 pWait->pHGCMReq = pHdr;
2060
2061 /*
2062 * Re-enter the spinlock and re-check for the condition.
2063 * If the condition is met, return.
2064 * Otherwise link us into the HGCM wait list and go to sleep.
2065 */
2066 RTSpinlockAcquire(pDevExt->EventSpinlock);
2067 RTListAppend(&pDevExt->HGCMWaitList, &pWait->ListNode);
2068 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
2069 {
2070 VBoxGuestWaitFreeLocked(pDevExt, pWait);
2071 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
2072 return VINF_SUCCESS;
2073 }
2074 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
2075
2076 if (fInterruptible)
2077 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
2078 else
2079 rc = RTSemEventMultiWait(pWait->Event, cMillies);
2080 if (rc == VERR_SEM_DESTROYED)
2081 return rc;
2082
2083 /*
2084 * Unlink, free and return.
2085 */
2086 if ( RT_FAILURE(rc)
2087 && rc != VERR_TIMEOUT
2088 && ( !fInterruptible
2089 || rc != VERR_INTERRUPTED))
2090 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
2091
2092 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
2093 return rc;
2094}
2095
2096
2097/**
2098 * This is a callback for dealing with async waits.
2099 *
2100 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
2101 */
2102static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
2103{
2104 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
2105 Log(("VBoxGuestHGCMAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
2106 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
2107 pDevExt,
2108 false /* fInterruptible */,
2109 u32User /* cMillies */);
2110}
2111
2112
2113/**
2114 * This is a callback for dealing with async waits with a timeout.
2115 *
2116 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
2117 */
2118static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr,
2119 void *pvUser, uint32_t u32User)
2120{
2121 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
2122 Log(("VBoxGuestHGCMAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
2123 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
2124 pDevExt,
2125 true /* fInterruptible */,
2126 u32User /* cMillies */ );
2127
2128}
2129
2130
2131static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2132 VBoxGuestHGCMConnectInfo *pInfo, size_t *pcbDataReturned)
2133{
2134 int rc;
2135
2136 /*
2137 * The VbglHGCMConnect call will invoke the callback if the HGCM
2138 * call is performed in an ASYNC fashion. The function is not able
2139 * to deal with cancelled requests.
2140 */
2141 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
2142 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
2143 ? pInfo->Loc.u.host.achName : "<not local host>"));
2144
2145 rc = VbglR0HGCMInternalConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2146 if (RT_SUCCESS(rc))
2147 {
2148 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
2149 pInfo->u32ClientID, pInfo->result, rc));
2150 if (RT_SUCCESS(pInfo->result))
2151 {
2152 /*
2153 * Append the client id to the client id table.
2154 * If the table has somehow become filled up, we'll disconnect the session.
2155 */
2156 unsigned i;
2157 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2158 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2159 if (!pSession->aHGCMClientIds[i])
2160 {
2161 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
2162 break;
2163 }
2164 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
2165 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
2166 {
2167 static unsigned s_cErrors = 0;
2168 VBoxGuestHGCMDisconnectInfo Info;
2169
2170 if (s_cErrors++ < 32)
2171 LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
2172
2173 Info.result = 0;
2174 Info.u32ClientID = pInfo->u32ClientID;
2175 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2176 return VERR_TOO_MANY_OPEN_FILES;
2177 }
2178 }
2179 if (pcbDataReturned)
2180 *pcbDataReturned = sizeof(*pInfo);
2181 }
2182 return rc;
2183}
2184
2185
2186static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
2187 size_t *pcbDataReturned)
2188{
2189 /*
2190 * Validate the client id and invalidate its entry while we're in the call.
2191 */
2192 int rc;
2193 const uint32_t u32ClientId = pInfo->u32ClientID;
2194 unsigned i;
2195 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2196 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2197 if (pSession->aHGCMClientIds[i] == u32ClientId)
2198 {
2199 pSession->aHGCMClientIds[i] = UINT32_MAX;
2200 break;
2201 }
2202 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
2203 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
2204 {
2205 static unsigned s_cErrors = 0;
2206 if (s_cErrors++ > 32)
2207 LogRel(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
2208 return VERR_INVALID_HANDLE;
2209 }
2210
2211 /*
2212 * The VbglHGCMConnect call will invoke the callback if the HGCM
2213 * call is performed in an ASYNC fashion. The function is not able
2214 * to deal with cancelled requests.
2215 */
2216 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
2217 rc = VbglR0HGCMInternalDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2218 if (RT_SUCCESS(rc))
2219 {
2220 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
2221 if (pcbDataReturned)
2222 *pcbDataReturned = sizeof(*pInfo);
2223 }
2224
2225 /* Update the client id array according to the result. */
2226 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2227 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
2228 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
2229 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
2230
2231 return rc;
2232}
2233
2234
2235static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt,
2236 PVBOXGUESTSESSION pSession,
2237 VBoxGuestHGCMCallInfo *pInfo,
2238 uint32_t cMillies, bool fInterruptible, bool f32bit, bool fUserData,
2239 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
2240{
2241 const uint32_t u32ClientId = pInfo->u32ClientID;
2242 uint32_t fFlags;
2243 size_t cbActual;
2244 unsigned i;
2245 int rc;
2246
2247 /*
2248 * Some more validations.
2249 */
2250 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
2251 {
2252 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
2253 return VERR_INVALID_PARAMETER;
2254 }
2255
2256 cbActual = cbExtra + sizeof(*pInfo);
2257#ifdef RT_ARCH_AMD64
2258 if (f32bit)
2259 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
2260 else
2261#endif
2262 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
2263 if (cbData < cbActual)
2264 {
2265 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
2266 cbData, cbData, cbActual, cbActual));
2267 return VERR_INVALID_PARAMETER;
2268 }
2269
2270 /*
2271 * Validate the client id.
2272 */
2273 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2274 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2275 if (pSession->aHGCMClientIds[i] == u32ClientId)
2276 break;
2277 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
2278 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
2279 {
2280 static unsigned s_cErrors = 0;
2281 if (s_cErrors++ > 32)
2282 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
2283 return VERR_INVALID_HANDLE;
2284 }
2285
2286 /*
2287 * The VbglHGCMCall call will invoke the callback if the HGCM
2288 * call is performed in an ASYNC fashion. This function can
2289 * deal with cancelled requests, so we let user more requests
2290 * be interruptible (should add a flag for this later I guess).
2291 */
2292 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
2293 fFlags = !fUserData && pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
2294#ifdef RT_ARCH_AMD64
2295 if (f32bit)
2296 {
2297 if (fInterruptible)
2298 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2299 else
2300 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
2301 }
2302 else
2303#endif
2304 {
2305 if (fInterruptible)
2306 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2307 else
2308 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
2309 }
2310 if (RT_SUCCESS(rc))
2311 {
2312 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
2313 if (pcbDataReturned)
2314 *pcbDataReturned = cbActual;
2315 }
2316 else
2317 {
2318 if ( rc != VERR_INTERRUPTED
2319 && rc != VERR_TIMEOUT)
2320 {
2321 static unsigned s_cErrors = 0;
2322 if (s_cErrors++ < 32)
2323 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
2324 }
2325 else
2326 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
2327 }
2328 return rc;
2329}
2330
2331
2332#endif /* VBOX_WITH_HGCM */
2333
2334/**
2335 * Handle VBOXGUEST_IOCTL_CHECK_BALLOON from R3.
2336 *
2337 * Ask the host for the size of the balloon and try to set it accordingly. If
2338 * this approach fails because it's not supported, return with fHandleInR3 set
2339 * and let the user land supply memory we can lock via the other ioctl.
2340 *
2341 * @returns VBox status code.
2342 *
2343 * @param pDevExt The device extension.
2344 * @param pSession The session.
2345 * @param pInfo The output buffer.
2346 * @param pcbDataReturned Where to store the amount of returned data. Can
2347 * be NULL.
2348 */
2349static int VBoxGuestCommonIOCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2350 VBoxGuestCheckBalloonInfo *pInfo, size_t *pcbDataReturned)
2351{
2352 VMMDevGetMemBalloonChangeRequest *pReq;
2353 int rc;
2354
2355 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON\n"));
2356 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2357 AssertRCReturn(rc, rc);
2358
2359 /*
2360 * The first user trying to query/change the balloon becomes the
2361 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
2362 */
2363 if ( pDevExt->MemBalloon.pOwner != pSession
2364 && pDevExt->MemBalloon.pOwner == NULL)
2365 pDevExt->MemBalloon.pOwner = pSession;
2366
2367 if (pDevExt->MemBalloon.pOwner == pSession)
2368 {
2369 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
2370 if (RT_SUCCESS(rc))
2371 {
2372 /*
2373 * This is a response to that event. Setting this bit means that
2374 * we request the value from the host and change the guest memory
2375 * balloon according to this value.
2376 */
2377 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
2378 rc = VbglGRPerform(&pReq->header);
2379 if (RT_SUCCESS(rc))
2380 {
2381 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
2382 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
2383
2384 pInfo->cBalloonChunks = pReq->cBalloonChunks;
2385 pInfo->fHandleInR3 = false;
2386
2387 rc = vboxGuestSetBalloonSizeKernel(pDevExt, pReq->cBalloonChunks, &pInfo->fHandleInR3);
2388 /* Ignore various out of memory failures. */
2389 if ( rc == VERR_NO_MEMORY
2390 || rc == VERR_NO_PHYS_MEMORY
2391 || rc == VERR_NO_CONT_MEMORY)
2392 rc = VINF_SUCCESS;
2393
2394 if (pcbDataReturned)
2395 *pcbDataReturned = sizeof(VBoxGuestCheckBalloonInfo);
2396 }
2397 else
2398 LogRel(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON: VbglGRPerform failed. rc=%Rrc\n", rc));
2399 VbglGRFree(&pReq->header);
2400 }
2401 }
2402 else
2403 rc = VERR_PERMISSION_DENIED;
2404
2405 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2406 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON returns %Rrc\n", rc));
2407 return rc;
2408}
2409
2410
2411/**
2412 * Handle a request for changing the memory balloon.
2413 *
2414 * @returns VBox status code.
2415 *
2416 * @param pDevExt The device extention.
2417 * @param pSession The session.
2418 * @param pInfo The change request structure (input).
2419 * @param pcbDataReturned Where to store the amount of returned data. Can
2420 * be NULL.
2421 */
2422static int VBoxGuestCommonIOCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2423 VBoxGuestChangeBalloonInfo *pInfo, size_t *pcbDataReturned)
2424{
2425 int rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2426 AssertRCReturn(rc, rc);
2427
2428 if (!pDevExt->MemBalloon.fUseKernelAPI)
2429 {
2430 /*
2431 * The first user trying to query/change the balloon becomes the
2432 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
2433 */
2434 if ( pDevExt->MemBalloon.pOwner != pSession
2435 && pDevExt->MemBalloon.pOwner == NULL)
2436 pDevExt->MemBalloon.pOwner = pSession;
2437
2438 if (pDevExt->MemBalloon.pOwner == pSession)
2439 {
2440 rc = vboxGuestSetBalloonSizeFromUser(pDevExt, pSession, pInfo->u64ChunkAddr, !!pInfo->fInflate);
2441 if (pcbDataReturned)
2442 *pcbDataReturned = 0;
2443 }
2444 else
2445 rc = VERR_PERMISSION_DENIED;
2446 }
2447 else
2448 rc = VERR_PERMISSION_DENIED;
2449
2450 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2451 return rc;
2452}
2453
2454
2455/**
2456 * Handle a request for writing a core dump of the guest on the host.
2457 *
2458 * @returns VBox status code.
2459 *
2460 * @param pDevExt The device extension.
2461 * @param pInfo The output buffer.
2462 */
2463static int VBoxGuestCommonIOCtl_WriteCoreDump(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWriteCoreDump *pInfo)
2464{
2465 VMMDevReqWriteCoreDump *pReq = NULL;
2466 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_WriteCoreDump);
2467 if (RT_FAILURE(rc))
2468 {
2469 Log(("VBoxGuestCommonIOCtl: WRITE_CORE_DUMP: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
2470 sizeof(*pReq), sizeof(*pReq), rc));
2471 return rc;
2472 }
2473
2474 pReq->fFlags = pInfo->fFlags;
2475 rc = VbglGRPerform(&pReq->header);
2476 if (RT_FAILURE(rc))
2477 Log(("VBoxGuestCommonIOCtl: WRITE_CORE_DUMP: VbglGRPerform failed, rc=%Rrc!\n", rc));
2478
2479 VbglGRFree(&pReq->header);
2480 return rc;
2481}
2482
2483
2484#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
2485/**
2486 * Enables the VRDP session and saves its session ID.
2487 *
2488 * @returns VBox status code.
2489 *
2490 * @param pDevExt The device extention.
2491 * @param pSession The session.
2492 */
2493static int VBoxGuestCommonIOCtl_EnableVRDPSession(VBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2494{
2495 /* Nothing to do here right now, since this only is supported on Windows at the moment. */
2496 return VERR_NOT_IMPLEMENTED;
2497}
2498
2499
2500/**
2501 * Disables the VRDP session.
2502 *
2503 * @returns VBox status code.
2504 *
2505 * @param pDevExt The device extention.
2506 * @param pSession The session.
2507 */
2508static int VBoxGuestCommonIOCtl_DisableVRDPSession(VBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2509{
2510 /* Nothing to do here right now, since this only is supported on Windows at the moment. */
2511 return VERR_NOT_IMPLEMENTED;
2512}
2513#endif /* VBOX_WITH_VRDP_SESSION_HANDLING */
2514
2515
2516/**
2517 * Guest backdoor logging.
2518 *
2519 * @returns VBox status code.
2520 *
2521 * @param pDevExt The device extension.
2522 * @param pch The log message (need not be NULL terminated).
2523 * @param cbData Size of the buffer.
2524 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2525 */
2526static int VBoxGuestCommonIOCtl_Log(PVBOXGUESTDEVEXT pDevExt, const char *pch, size_t cbData, size_t *pcbDataReturned)
2527{
2528 NOREF(pch);
2529 NOREF(cbData);
2530 if (pDevExt->fLoggingEnabled)
2531 RTLogBackdoorPrintf("%.*s", cbData, pch);
2532 else
2533 Log(("%.*s", cbData, pch));
2534 if (pcbDataReturned)
2535 *pcbDataReturned = 0;
2536 return VINF_SUCCESS;
2537}
2538
2539static bool VBoxGuestCommonGuestCapsValidateValues(uint32_t fCaps)
2540{
2541 if (fCaps & (~(VMMDEV_GUEST_SUPPORTS_SEAMLESS | VMMDEV_GUEST_SUPPORTS_GUEST_HOST_WINDOW_MAPPING | VMMDEV_GUEST_SUPPORTS_GRAPHICS)))
2542 return false;
2543
2544 return true;
2545}
2546
2547/** Check whether any unreported VMM device events should be reported to any of
2548 * the currently listening sessions. In addition, report any events in
2549 * @a fGenFakeEvents.
2550 * @note This is called by GUEST_CAPS_ACQUIRE in case any pending events can now
2551 * be dispatched to the session which acquired capabilities. The fake
2552 * events are a hack to wake up threads in that session which would not
2553 * otherwise be woken.
2554 * @todo Why not just use CANCEL_ALL_WAITEVENTS to do the waking up rather than
2555 * adding additional code to the driver?
2556 * @todo Why does acquiring capabilities block and unblock events? Capabilities
2557 * are supposed to control what is reported to the host, we already have
2558 * separate requests for blocking and unblocking events. */
2559static void VBoxGuestCommonCheckEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fGenFakeEvents)
2560{
2561 RTSpinlockAcquire(pDevExt->EventSpinlock);
2562 uint32_t fEvents = fGenFakeEvents | pDevExt->f32PendingEvents;
2563 PVBOXGUESTWAIT pWait;
2564 PVBOXGUESTWAIT pSafe;
2565
2566 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
2567 {
2568 uint32_t fHandledEvents = VBoxGuestCommonGetHandledEventsLocked(pDevExt, pWait->pSession);
2569 if ( (pWait->fReqEvents & fEvents & fHandledEvents)
2570 && !pWait->fResEvents)
2571 {
2572 pWait->fResEvents = pWait->fReqEvents & fEvents & fHandledEvents;
2573 Assert(!(fGenFakeEvents & pWait->fResEvents) || pSession == pWait->pSession);
2574 fEvents &= ~pWait->fResEvents;
2575 RTListNodeRemove(&pWait->ListNode);
2576#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2577 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
2578#else
2579 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
2580 int rc = RTSemEventMultiSignal(pWait->Event);
2581 AssertRC(rc);
2582#endif
2583 if (!fEvents)
2584 break;
2585 }
2586 }
2587 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
2588
2589 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
2590
2591#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2592 VBoxGuestWaitDoWakeUps(pDevExt);
2593#endif
2594}
2595
2596/** Switch the capabilities in @a fOrMask to "acquire" mode if they are not
2597 * already in "set" mode. If @a enmFlags is not set to
2598 * VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE, also try to acquire those
2599 * capabilities for the current session and release those in @a fNotFlag. */
2600static int VBoxGuestCommonGuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fOrMask, uint32_t fNotMask, VBOXGUESTCAPSACQUIRE_FLAGS enmFlags)
2601{
2602 uint32_t fSetCaps = 0;
2603
2604 if (!VBoxGuestCommonGuestCapsValidateValues(fOrMask))
2605 {
2606 LogRel(("VBoxGuestCommonGuestCapsAcquire: pSession(0x%p), OR(0x%x), NOT(0x%x), flags(0x%x) -- invalid fOrMask\n",
2607 pSession, fOrMask, fNotMask, enmFlags));
2608 return VERR_INVALID_PARAMETER;
2609 }
2610
2611 if ( enmFlags != VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE
2612 && enmFlags != VBOXGUESTCAPSACQUIRE_FLAGS_NONE)
2613 {
2614 LogRel(("VBoxGuestCommonGuestCapsAcquire: pSession(0x%p), OR(0x%x), NOT(0x%x), flags(0x%x) -- invalid enmFlags %d\n",
2615 pSession, fOrMask, fNotMask, enmFlags));
2616 return VERR_INVALID_PARAMETER;
2617 }
2618
2619 if (!VBoxGuestCommonGuestCapsModeSet(pDevExt, fOrMask, true, &fSetCaps))
2620 {
2621 LogRel(("VBoxGuestCommonGuestCapsAcquire: pSession(0x%p), OR(0x%x), NOT(0x%x), flags(0x%x) -- calling caps acquire for set caps\n",
2622 pSession, fOrMask, fNotMask, enmFlags));
2623 return VERR_INVALID_STATE;
2624 }
2625
2626 if (enmFlags & VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE)
2627 {
2628 Log(("VBoxGuestCommonGuestCapsAcquire: pSession(0x%p), OR(0x%x), NOT(0x%x), flags(0x%x) -- configured acquire caps: 0x%x\n",
2629 pSession, fOrMask, fNotMask, enmFlags));
2630 return VINF_SUCCESS;
2631 }
2632
2633 /* the fNotMask no need to have all values valid,
2634 * invalid ones will simply be ignored */
2635 uint32_t fCurrentOwnedCaps;
2636 uint32_t fSessionNotCaps;
2637 uint32_t fSessionOrCaps;
2638 uint32_t fOtherConflictingCaps;
2639
2640 fNotMask &= ~fOrMask;
2641
2642 RTSpinlockAcquire(pDevExt->EventSpinlock);
2643
2644 fCurrentOwnedCaps = pSession->u32AquiredGuestCaps;
2645 fSessionNotCaps = fCurrentOwnedCaps & fNotMask;
2646 fSessionOrCaps = fOrMask & ~fCurrentOwnedCaps;
2647 fOtherConflictingCaps = pDevExt->u32GuestCaps & ~fCurrentOwnedCaps;
2648 fOtherConflictingCaps &= fSessionOrCaps;
2649
2650 if (!fOtherConflictingCaps)
2651 {
2652 if (fSessionOrCaps)
2653 {
2654 pSession->u32AquiredGuestCaps |= fSessionOrCaps;
2655 pDevExt->u32GuestCaps |= fSessionOrCaps;
2656 }
2657
2658 if (fSessionNotCaps)
2659 {
2660 pSession->u32AquiredGuestCaps &= ~fSessionNotCaps;
2661 pDevExt->u32GuestCaps &= ~fSessionNotCaps;
2662 }
2663 }
2664
2665 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
2666
2667 if (fOtherConflictingCaps)
2668 {
2669 Log(("VBoxGuest: Caps 0x%x were busy\n", fOtherConflictingCaps));
2670 return VERR_RESOURCE_BUSY;
2671 }
2672
2673 /* now do host notification outside the lock */
2674 if (!fSessionOrCaps && !fSessionNotCaps)
2675 {
2676 /* no changes, return */
2677 return VINF_SUCCESS;
2678 }
2679
2680 int rc = VBoxGuestSetGuestCapabilities(fSessionOrCaps, fSessionNotCaps);
2681 if (RT_FAILURE(rc))
2682 {
2683 LogRel(("VBoxGuestCommonGuestCapsAcquire: VBoxGuestSetGuestCapabilities failed, rc=%Rrc\n", rc));
2684
2685 /* Failure branch
2686 * this is generally bad since e.g. failure to release the caps may result in other sessions not being able to use it
2687 * so we are not trying to restore the caps back to their values before the VBoxGuestCommonGuestCapsAcquire call,
2688 * but just pretend everithing is OK.
2689 * @todo: better failure handling mechanism? */
2690 }
2691
2692 /* success! */
2693 uint32_t fGenFakeEvents = 0;
2694
2695 if (fSessionOrCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
2696 {
2697 /* generate the seamless change event so that the r3 app could synch with the seamless state
2698 * although this introduces a false alarming of r3 client, it still solve the problem of
2699 * client state inconsistency in multiuser environment */
2700 fGenFakeEvents |= VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
2701 }
2702
2703 /* since the acquire filter mask has changed, we need to process events in any way to ensure they go from pending events field
2704 * to the proper (un-filtered) entries */
2705 VBoxGuestCommonCheckEvents(pDevExt, pSession, fGenFakeEvents);
2706
2707 return VINF_SUCCESS;
2708}
2709
2710static int VBoxGuestCommonIOCTL_GuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestCapsAquire *pAcquire)
2711{
2712 int rc = VBoxGuestCommonGuestCapsAcquire(pDevExt, pSession, pAcquire->u32OrMask, pAcquire->u32NotMask, pAcquire->enmFlags);
2713 if (RT_FAILURE(rc))
2714 LogRel(("VBoxGuestCommonGuestCapsAcquire: failed rc=%Rrc\n", rc));
2715 pAcquire->rc = rc;
2716 return VINF_SUCCESS;
2717}
2718
2719
2720/**
2721 * Common IOCtl for user to kernel and kernel to kernel communication.
2722 *
2723 * This function only does the basic validation and then invokes
2724 * worker functions that takes care of each specific function.
2725 *
2726 * @returns VBox status code.
2727 *
2728 * @param iFunction The requested function.
2729 * @param pDevExt The device extension.
2730 * @param pSession The client session.
2731 * @param pvData The input/output data buffer. Can be NULL depending on the function.
2732 * @param cbData The max size of the data buffer.
2733 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2734 */
2735int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2736 void *pvData, size_t cbData, size_t *pcbDataReturned)
2737{
2738 int rc;
2739 Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
2740 iFunction, pDevExt, pSession, pvData, cbData));
2741
2742 /*
2743 * Make sure the returned data size is set to zero.
2744 */
2745 if (pcbDataReturned)
2746 *pcbDataReturned = 0;
2747
2748 /*
2749 * Define some helper macros to simplify validation.
2750 */
2751#define CHECKRET_RING0(mnemonic) \
2752 do { \
2753 if (pSession->R0Process != NIL_RTR0PROCESS) \
2754 { \
2755 LogFunc((mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
2756 pSession->Process, (uintptr_t)pSession->R0Process)); \
2757 return VERR_PERMISSION_DENIED; \
2758 } \
2759 } while (0)
2760#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
2761 do { \
2762 if (cbData < (cbMin)) \
2763 { \
2764 LogFunc((mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
2765 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
2766 return VERR_BUFFER_OVERFLOW; \
2767 } \
2768 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
2769 { \
2770 LogFunc((mnemonic ": Invalid pointer %p\n", pvData)); \
2771 return VERR_INVALID_POINTER; \
2772 } \
2773 } while (0)
2774#define CHECKRET_SIZE(mnemonic, cb) \
2775 do { \
2776 if (cbData != (cb)) \
2777 { \
2778 LogFunc((mnemonic ": cbData=%#zx (%zu) expected is %#zx (%zu)\n", \
2779 cbData, cbData, (size_t)(cb), (size_t)(cb))); \
2780 return VERR_BUFFER_OVERFLOW; \
2781 } \
2782 if ((cb) != 0 && !VALID_PTR(pvData)) \
2783 { \
2784 LogFunc((mnemonic ": Invalid pointer %p\n", pvData)); \
2785 return VERR_INVALID_POINTER; \
2786 } \
2787 } while (0)
2788
2789
2790 /*
2791 * Deal with variably sized requests first.
2792 */
2793 rc = VINF_SUCCESS;
2794 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
2795 {
2796 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
2797 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
2798 }
2799#ifdef VBOX_WITH_HGCM
2800 /*
2801 * These ones are a bit tricky.
2802 */
2803 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
2804 {
2805 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2806 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2807 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2808 fInterruptible, false /*f32bit*/, false /* fUserData */,
2809 0, cbData, pcbDataReturned);
2810 }
2811 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
2812 {
2813 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2814 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2815 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2816 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2817 false /*f32bit*/, false /* fUserData */,
2818 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2819 }
2820 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_USERDATA(0)))
2821 {
2822 bool fInterruptible = true;
2823 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2824 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2825 fInterruptible, false /*f32bit*/, true /* fUserData */,
2826 0, cbData, pcbDataReturned);
2827 }
2828# ifdef RT_ARCH_AMD64
2829 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
2830 {
2831 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2832 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2833 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2834 fInterruptible, true /*f32bit*/, false /* fUserData */,
2835 0, cbData, pcbDataReturned);
2836 }
2837 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
2838 {
2839 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2840 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2841 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2842 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2843 true /*f32bit*/, false /* fUserData */,
2844 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2845 }
2846# endif
2847#endif /* VBOX_WITH_HGCM */
2848 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
2849 {
2850 CHECKRET_MIN_SIZE("LOG", 1);
2851 rc = VBoxGuestCommonIOCtl_Log(pDevExt, (char *)pvData, cbData, pcbDataReturned);
2852 }
2853 else
2854 {
2855 switch (iFunction)
2856 {
2857 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
2858 CHECKRET_RING0("GETVMMDEVPORT");
2859 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
2860 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
2861 break;
2862
2863#ifndef RT_OS_WINDOWS /* Windows has its own implementation of this. */
2864 case VBOXGUEST_IOCTL_SET_MOUSE_NOTIFY_CALLBACK:
2865 CHECKRET_RING0("SET_MOUSE_NOTIFY_CALLBACK");
2866 CHECKRET_SIZE("SET_MOUSE_NOTIFY_CALLBACK", sizeof(VBoxGuestMouseSetNotifyCallback));
2867 rc = VBoxGuestCommonIOCtl_SetMouseNotifyCallback(pDevExt, (VBoxGuestMouseSetNotifyCallback *)pvData);
2868 break;
2869#endif
2870
2871 case VBOXGUEST_IOCTL_WAITEVENT:
2872 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
2873 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, pSession, (VBoxGuestWaitEventInfo *)pvData,
2874 pcbDataReturned, pSession->R0Process != NIL_RTR0PROCESS);
2875 break;
2876
2877 case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
2878 if (cbData != 0)
2879 rc = VERR_INVALID_PARAMETER;
2880 rc = VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
2881 break;
2882
2883 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
2884 CHECKRET_MIN_SIZE("CTL_FILTER_MASK",
2885 sizeof(VBoxGuestFilterMaskInfo));
2886 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, pSession,
2887 (VBoxGuestFilterMaskInfo *)pvData);
2888 break;
2889
2890#ifdef VBOX_WITH_HGCM
2891 case VBOXGUEST_IOCTL_HGCM_CONNECT:
2892# ifdef RT_ARCH_AMD64
2893 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
2894# endif
2895 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
2896 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
2897 break;
2898
2899 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
2900# ifdef RT_ARCH_AMD64
2901 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
2902# endif
2903 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
2904 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
2905 break;
2906#endif /* VBOX_WITH_HGCM */
2907
2908 case VBOXGUEST_IOCTL_CHECK_BALLOON:
2909 CHECKRET_MIN_SIZE("CHECK_MEMORY_BALLOON", sizeof(VBoxGuestCheckBalloonInfo));
2910 rc = VBoxGuestCommonIOCtl_CheckMemoryBalloon(pDevExt, pSession, (VBoxGuestCheckBalloonInfo *)pvData, pcbDataReturned);
2911 break;
2912
2913 case VBOXGUEST_IOCTL_CHANGE_BALLOON:
2914 CHECKRET_MIN_SIZE("CHANGE_MEMORY_BALLOON", sizeof(VBoxGuestChangeBalloonInfo));
2915 rc = VBoxGuestCommonIOCtl_ChangeMemoryBalloon(pDevExt, pSession, (VBoxGuestChangeBalloonInfo *)pvData, pcbDataReturned);
2916 break;
2917
2918 case VBOXGUEST_IOCTL_WRITE_CORE_DUMP:
2919 CHECKRET_MIN_SIZE("WRITE_CORE_DUMP", sizeof(VBoxGuestWriteCoreDump));
2920 rc = VBoxGuestCommonIOCtl_WriteCoreDump(pDevExt, (VBoxGuestWriteCoreDump *)pvData);
2921 break;
2922
2923#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
2924 case VBOXGUEST_IOCTL_ENABLE_VRDP_SESSION:
2925 rc = VBoxGuestCommonIOCtl_EnableVRDPSession(pDevExt, pSession);
2926 break;
2927
2928 case VBOXGUEST_IOCTL_DISABLE_VRDP_SESSION:
2929 rc = VBoxGuestCommonIOCtl_DisableVRDPSession(pDevExt, pSession);
2930 break;
2931#endif /* VBOX_WITH_VRDP_SESSION_HANDLING */
2932 case VBOXGUEST_IOCTL_SET_MOUSE_STATUS:
2933 CHECKRET_SIZE("SET_MOUSE_STATUS", sizeof(uint32_t));
2934 rc = vboxGuestCommonIOCtl_SetMouseStatus(pDevExt, pSession,
2935 *(uint32_t *)pvData);
2936 break;
2937
2938#ifdef VBOX_WITH_DPC_LATENCY_CHECKER
2939 case VBOXGUEST_IOCTL_DPC_LATENCY_CHECKER:
2940 CHECKRET_SIZE("DPC_LATENCY_CHECKER", 0);
2941 rc = VbgdNtIOCtl_DpcLatencyChecker();
2942 break;
2943#endif
2944
2945 case VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE:
2946 CHECKRET_SIZE("GUEST_CAPS_ACQUIRE", sizeof(VBoxGuestCapsAquire));
2947 rc = VBoxGuestCommonIOCTL_GuestCapsAcquire(pDevExt, pSession, (VBoxGuestCapsAquire*)pvData);
2948 *pcbDataReturned = sizeof(VBoxGuestCapsAquire);
2949 break;
2950
2951 case VBOXGUEST_IOCTL_SET_GUEST_CAPABILITIES:
2952 CHECKRET_MIN_SIZE("SET_GUEST_CAPABILITIES",
2953 sizeof(VBoxGuestSetCapabilitiesInfo));
2954 rc = VBoxGuestCommonIOCtl_SetCapabilities(pDevExt, pSession,
2955 (VBoxGuestSetCapabilitiesInfo *)pvData);
2956 break;
2957
2958 default:
2959 {
2960 LogRel(("VBoxGuestCommonIOCtl: Unknown request iFunction=%#x stripped size=%#x\n",
2961 iFunction, VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
2962 rc = VERR_NOT_SUPPORTED;
2963 break;
2964 }
2965 }
2966 }
2967
2968 Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
2969 return rc;
2970}
2971
2972
2973
2974/**
2975 * Common interrupt service routine.
2976 *
2977 * This deals with events and with waking up thread waiting for those events.
2978 *
2979 * @returns true if it was our interrupt, false if it wasn't.
2980 * @param pDevExt The VBoxGuest device extension.
2981 */
2982bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
2983{
2984 bool fMousePositionChanged = false;
2985 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
2986 int rc = 0;
2987 bool fOurIrq;
2988
2989 /*
2990 * Make sure we've initialized the device extension.
2991 */
2992 if (RT_UNLIKELY(!pReq))
2993 return false;
2994
2995 /*
2996 * Enter the spinlock and check if it's our IRQ or not.
2997 */
2998 RTSpinlockAcquire(pDevExt->EventSpinlock);
2999 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
3000 if (fOurIrq)
3001 {
3002 /*
3003 * Acknowlegde events.
3004 * We don't use VbglGRPerform here as it may take another spinlocks.
3005 */
3006 pReq->header.rc = VERR_INTERNAL_ERROR;
3007 pReq->events = 0;
3008 ASMCompilerBarrier();
3009 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
3010 ASMCompilerBarrier(); /* paranoia */
3011 if (RT_SUCCESS(pReq->header.rc))
3012 {
3013 uint32_t fEvents = pReq->events;
3014 PVBOXGUESTWAIT pWait;
3015 PVBOXGUESTWAIT pSafe;
3016
3017 Log3(("VBoxGuestCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
3018
3019 /*
3020 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
3021 */
3022 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
3023 {
3024 fMousePositionChanged = true;
3025 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
3026#ifndef RT_OS_WINDOWS
3027 if (pDevExt->MouseNotifyCallback.pfnNotify)
3028 pDevExt->MouseNotifyCallback.pfnNotify
3029 (pDevExt->MouseNotifyCallback.pvUser);
3030#endif
3031 }
3032
3033#ifdef VBOX_WITH_HGCM
3034 /*
3035 * The HGCM event/list is kind of different in that we evaluate all entries.
3036 */
3037 if (fEvents & VMMDEV_EVENT_HGCM)
3038 {
3039 RTListForEachSafe(&pDevExt->HGCMWaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
3040 {
3041 if (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE)
3042 {
3043 pWait->fResEvents = VMMDEV_EVENT_HGCM;
3044 RTListNodeRemove(&pWait->ListNode);
3045# ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
3046 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
3047# else
3048 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
3049 rc |= RTSemEventMultiSignal(pWait->Event);
3050# endif
3051 }
3052 }
3053 fEvents &= ~VMMDEV_EVENT_HGCM;
3054 }
3055#endif
3056
3057 /*
3058 * Normal FIFO waiter evaluation.
3059 */
3060 fEvents |= pDevExt->f32PendingEvents;
3061 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
3062 {
3063 uint32_t fHandledEvents = VBoxGuestCommonGetHandledEventsLocked(pDevExt, pWait->pSession);
3064 if ( (pWait->fReqEvents & fEvents & fHandledEvents)
3065 && !pWait->fResEvents)
3066 {
3067 pWait->fResEvents = pWait->fReqEvents & fEvents & fHandledEvents;
3068 fEvents &= ~pWait->fResEvents;
3069 RTListNodeRemove(&pWait->ListNode);
3070#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
3071 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
3072#else
3073 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
3074 rc |= RTSemEventMultiSignal(pWait->Event);
3075#endif
3076 if (!fEvents)
3077 break;
3078 }
3079 }
3080 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
3081 }
3082 else /* something is serious wrong... */
3083 Log(("VBoxGuestCommonISR: acknowledge events failed rc=%Rrc (events=%#x)!!\n",
3084 pReq->header.rc, pReq->events));
3085 }
3086 else
3087 LogFlow(("VBoxGuestCommonISR: not ours\n"));
3088
3089 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
3090
3091#if defined(VBOXGUEST_USE_DEFERRED_WAKE_UP) && !defined(RT_OS_DARWIN) && !defined(RT_OS_WINDOWS)
3092 /*
3093 * Do wake-ups.
3094 * Note. On Windows this isn't possible at this IRQL, so a DPC will take
3095 * care of it. Same on darwin, doing it in the work loop callback.
3096 */
3097 VBoxGuestWaitDoWakeUps(pDevExt);
3098#endif
3099
3100 /*
3101 * Work the poll and async notification queues on OSes that implements that.
3102 * (Do this outside the spinlock to prevent some recursive spinlocking.)
3103 */
3104 if (fMousePositionChanged)
3105 {
3106 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
3107 VBoxGuestNativeISRMousePollEvent(pDevExt);
3108 }
3109
3110 Assert(rc == 0);
3111 NOREF(rc);
3112 return fOurIrq;
3113}
3114
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette