VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 53402

Last change on this file since 53402 was 53293, checked in by vboxsync, 10 years ago

VBoxGuest: warnings

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 114.2 KB
Line 
1/* $Id: VBoxGuest.cpp 53293 2014-11-10 15:37:37Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2014 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#define LOG_GROUP LOG_GROUP_DEFAULT
32#include "VBoxGuestInternal.h"
33#include "VBoxGuest2.h"
34#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
35#include <VBox/log.h>
36#include <iprt/mem.h>
37#include <iprt/time.h>
38#include <iprt/memobj.h>
39#include <iprt/asm.h>
40#include <iprt/asm-amd64-x86.h>
41#include <iprt/string.h>
42#include <iprt/process.h>
43#include <iprt/assert.h>
44#include <iprt/param.h>
45#include <iprt/timer.h>
46#ifdef VBOX_WITH_HGCM
47# include <iprt/thread.h>
48#endif
49#include "version-generated.h"
50#if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
51# include "revision-generated.h"
52#endif
53#ifdef RT_OS_WINDOWS
54# ifndef CTL_CODE
55# include <Windows.h>
56# endif
57#endif
58#if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
59# include <iprt/rand.h>
60#endif
61
62
63/*******************************************************************************
64* Internal Functions *
65*******************************************************************************/
66#ifdef VBOX_WITH_HGCM
67static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
68#endif
69
70static int VBoxGuestCommonGuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fOrMask, uint32_t fNotMask, VBOXGUESTCAPSACQUIRE_FLAGS enmFlags);
71
72#define VBOXGUEST_ACQUIRE_STYLE_EVENTS (VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST | VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST)
73
74/** Return the mask of VMM device events that this session is allowed to see,
75 * ergo, all events except those in "acquire" mode which have not been acquired
76 * by this session. */
77DECLINLINE(uint32_t) VBoxGuestCommonGetHandledEventsLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
78{
79 if (!pDevExt->u32AcquireModeGuestCaps)
80 return VMMDEV_EVENT_VALID_EVENT_MASK;
81
82 /** @note VMMDEV_EVENT_VALID_EVENT_MASK should actually be the mask of valid
83 * capabilities, but that doesn't affect this code. */
84 uint32_t u32AllowedGuestCaps = pSession->u32AquiredGuestCaps | (VMMDEV_EVENT_VALID_EVENT_MASK & ~pDevExt->u32AcquireModeGuestCaps);
85 uint32_t u32CleanupEvents = VBOXGUEST_ACQUIRE_STYLE_EVENTS;
86 if (u32AllowedGuestCaps & VMMDEV_GUEST_SUPPORTS_GRAPHICS)
87 u32CleanupEvents &= ~VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST;
88 if (u32AllowedGuestCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
89 u32CleanupEvents &= ~VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
90
91 return VMMDEV_EVENT_VALID_EVENT_MASK & ~u32CleanupEvents;
92}
93
94DECLINLINE(uint32_t) VBoxGuestCommonGetAndCleanPendingEventsLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fReqEvents)
95{
96 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents & VBoxGuestCommonGetHandledEventsLocked(pDevExt, pSession);
97 if (fMatches)
98 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
99 return fMatches;
100}
101
102/** Puts a capability in "acquire" or "set" mode and returns the mask of
103 * capabilities currently in the other mode. Once a capability has been put in
104 * one of the two modes it can no longer be removed from that mode. */
105DECLINLINE(bool) VBoxGuestCommonGuestCapsModeSet(PVBOXGUESTDEVEXT pDevExt, uint32_t fCaps, bool fAcquire, uint32_t *pu32OtherVal)
106{
107 uint32_t *pVal = fAcquire ? &pDevExt->u32AcquireModeGuestCaps : &pDevExt->u32SetModeGuestCaps;
108 const uint32_t fNotVal = !fAcquire ? pDevExt->u32AcquireModeGuestCaps : pDevExt->u32SetModeGuestCaps;
109 bool fResult = true;
110 RTSpinlockAcquire(pDevExt->EventSpinlock);
111
112 if (!(fNotVal & fCaps))
113 *pVal |= fCaps;
114 else
115 {
116 AssertMsgFailed(("trying to change caps mode\n"));
117 fResult = false;
118 }
119
120 RTSpinlockRelease(pDevExt->EventSpinlock);
121
122 if (pu32OtherVal)
123 *pu32OtherVal = fNotVal;
124 return fResult;
125}
126
127
128/**
129 * Sets the interrupt filter mask during initialization and termination.
130 *
131 * This will ASSUME that we're the ones in carge over the mask, so
132 * we'll simply clear all bits we don't set.
133 *
134 * @returns VBox status code (ignored).
135 * @param fMask The new mask.
136 */
137static int vboxGuestSetFilterMask(VMMDevCtlGuestFilterMask *pReq,
138 uint32_t fMask)
139{
140 int rc;
141
142 pReq->u32OrMask = fMask;
143 pReq->u32NotMask = ~fMask;
144 rc = VbglGRPerform(&pReq->header);
145 if (RT_FAILURE(rc))
146 LogRel(("vboxGuestSetFilterMask: failed with rc=%Rrc\n", rc));
147 return rc;
148}
149
150
151/**
152 * Sets the guest capabilities to the host.
153 *
154 * This will ASSUME that we're the ones in charge of the mask, so
155 * we'll simply clear all bits we don't set.
156 *
157 * @returns VBox status code.
158 * @param fMask The new mask.
159 */
160static int vboxGuestSetCapabilities(VMMDevReqGuestCapabilities2 *pReq,
161 uint32_t fMask)
162{
163 int rc;
164
165 pReq->u32OrMask = fMask;
166 pReq->u32NotMask = ~fMask;
167 rc = VbglGRPerform(&pReq->header);
168 if (RT_FAILURE(rc))
169 LogRelFunc(("failed with rc=%Rrc\n", rc));
170 return rc;
171}
172
173
174/**
175 * Sets the mouse status to the host.
176 *
177 * This will ASSUME that we're the ones in charge of the mask, so
178 * we'll simply clear all bits we don't set.
179 *
180 * @returns VBox status code.
181 * @param fMask The new mask.
182 */
183static int vboxGuestSetMouseStatus(VMMDevReqMouseStatus *pReq, uint32_t fMask)
184{
185 int rc;
186
187 pReq->mouseFeatures = fMask;
188 pReq->pointerXPos = 0;
189 pReq->pointerYPos = 0;
190 rc = VbglGRPerform(&pReq->header);
191 if (RT_FAILURE(rc))
192 LogRelFunc(("failed with rc=%Rrc\n", rc));
193 return rc;
194}
195
196
197/** Host flags to be updated by a given invocation of the
198 * vboxGuestUpdateHostFlags() method. */
199enum
200{
201 HostFlags_FilterMask = 1,
202 HostFlags_Capabilities = 2,
203 HostFlags_MouseStatus = 4,
204 HostFlags_All = 7,
205 HostFlags_SizeHack = (unsigned)-1
206};
207
208
209static int vboxGuestGetHostFlagsFromSessions(PVBOXGUESTDEVEXT pDevExt,
210 PVBOXGUESTSESSION pSession,
211 uint32_t *pfFilterMask,
212 uint32_t *pfCapabilities,
213 uint32_t *pfMouseStatus)
214{
215 PVBOXGUESTSESSION pIterator;
216 uint32_t fFilterMask = 0, fCapabilities = 0, fMouseStatus = 0;
217 unsigned cSessions = 0;
218 int rc = VINF_SUCCESS;
219
220 RTListForEach(&pDevExt->SessionList, pIterator, VBOXGUESTSESSION, ListNode)
221 {
222 fFilterMask |= pIterator->fFilterMask;
223 fCapabilities |= pIterator->fCapabilities;
224 fMouseStatus |= pIterator->fMouseStatus;
225 ++cSessions;
226 }
227 if (!cSessions)
228 if (fFilterMask | fCapabilities | fMouseStatus)
229 rc = VERR_INTERNAL_ERROR;
230 if (cSessions == 1 && pSession)
231 if ( fFilterMask != pSession->fFilterMask
232 || fCapabilities != pSession->fCapabilities
233 || fMouseStatus != pSession->fMouseStatus)
234 rc = VERR_INTERNAL_ERROR;
235 if (cSessions > 1 && pSession)
236 if ( ~fFilterMask & pSession->fFilterMask
237 || ~fCapabilities & pSession->fCapabilities
238 || ~fMouseStatus & pSession->fMouseStatus)
239 rc = VERR_INTERNAL_ERROR;
240 *pfFilterMask = fFilterMask;
241 *pfCapabilities = fCapabilities;
242 *pfMouseStatus = fMouseStatus;
243 return rc;
244}
245
246
247/** Check which host flags in a given category are being asserted by some guest
248 * session and assert exactly those on the host which are being asserted by one
249 * or more sessions. pCallingSession is purely for sanity checking and can be
250 * NULL.
251 * @note Takes the session spin-lock.
252 */
253static int vboxGuestUpdateHostFlags(PVBOXGUESTDEVEXT pDevExt,
254 PVBOXGUESTSESSION pSession,
255 unsigned enmFlags)
256{
257 int rc;
258 VMMDevCtlGuestFilterMask *pFilterReq = NULL;
259 VMMDevReqGuestCapabilities2 *pCapabilitiesReq = NULL;
260 VMMDevReqMouseStatus *pStatusReq = NULL;
261 uint32_t fFilterMask = 0, fCapabilities = 0, fMouseStatus = 0;
262
263 rc = VbglGRAlloc((VMMDevRequestHeader **)&pFilterReq, sizeof(*pFilterReq),
264 VMMDevReq_CtlGuestFilterMask);
265 if (RT_SUCCESS(rc))
266 rc = VbglGRAlloc((VMMDevRequestHeader **)&pCapabilitiesReq,
267 sizeof(*pCapabilitiesReq),
268 VMMDevReq_SetGuestCapabilities);
269 if (RT_SUCCESS(rc))
270 rc = VbglGRAlloc((VMMDevRequestHeader **)&pStatusReq,
271 sizeof(*pStatusReq), VMMDevReq_SetMouseStatus);
272 RTSpinlockAcquire(pDevExt->SessionSpinlock);
273 if (RT_SUCCESS(rc))
274 rc = vboxGuestGetHostFlagsFromSessions(pDevExt, pSession, &fFilterMask,
275 &fCapabilities, &fMouseStatus);
276 if (RT_SUCCESS(rc))
277 {
278 fFilterMask |= pDevExt->fFixedEvents;
279 /* Since VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR is inverted in the session
280 * capabilities we invert it again before sending it to the host. */
281 fMouseStatus ^= VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR;
282 if (enmFlags & HostFlags_FilterMask)
283 vboxGuestSetFilterMask(pFilterReq, fFilterMask);
284 fCapabilities |= pDevExt->u32GuestCaps;
285 if (enmFlags & HostFlags_Capabilities)
286 vboxGuestSetCapabilities(pCapabilitiesReq, fCapabilities);
287 if (enmFlags & HostFlags_MouseStatus)
288 vboxGuestSetMouseStatus(pStatusReq, fMouseStatus);
289 }
290 RTSpinlockRelease(pDevExt->SessionSpinlock);
291 if (pFilterReq)
292 VbglGRFree(&pFilterReq->header);
293 if (pCapabilitiesReq)
294 VbglGRFree(&pCapabilitiesReq->header);
295 if (pStatusReq)
296 VbglGRFree(&pStatusReq->header);
297 return rc;
298}
299
300
301/*******************************************************************************
302* Global Variables *
303*******************************************************************************/
304static const uint32_t cbChangeMemBalloonReq = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
305
306#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
307/**
308 * Drag in the rest of IRPT since we share it with the
309 * rest of the kernel modules on Solaris.
310 */
311PFNRT g_apfnVBoxGuestIPRTDeps[] =
312{
313 /* VirtioNet */
314 (PFNRT)RTRandBytes,
315 /* RTSemMutex* */
316 (PFNRT)RTSemMutexCreate,
317 (PFNRT)RTSemMutexDestroy,
318 (PFNRT)RTSemMutexRequest,
319 (PFNRT)RTSemMutexRequestNoResume,
320 (PFNRT)RTSemMutexRequestDebug,
321 (PFNRT)RTSemMutexRequestNoResumeDebug,
322 (PFNRT)RTSemMutexRelease,
323 (PFNRT)RTSemMutexIsOwned,
324 NULL
325};
326#endif /* RT_OS_DARWIN || RT_OS_SOLARIS */
327
328
329/**
330 * Reserves memory in which the VMM can relocate any guest mappings
331 * that are floating around.
332 *
333 * This operation is a little bit tricky since the VMM might not accept
334 * just any address because of address clashes between the three contexts
335 * it operates in, so use a small stack to perform this operation.
336 *
337 * @returns VBox status code (ignored).
338 * @param pDevExt The device extension.
339 */
340static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
341{
342 /*
343 * Query the required space.
344 */
345 VMMDevReqHypervisorInfo *pReq;
346 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
347 if (RT_FAILURE(rc))
348 return rc;
349 pReq->hypervisorStart = 0;
350 pReq->hypervisorSize = 0;
351 rc = VbglGRPerform(&pReq->header);
352 if (RT_FAILURE(rc)) /* this shouldn't happen! */
353 {
354 VbglGRFree(&pReq->header);
355 return rc;
356 }
357
358 /*
359 * The VMM will report back if there is nothing it wants to map, like for
360 * instance in VT-x and AMD-V mode.
361 */
362 if (pReq->hypervisorSize == 0)
363 LogFlowFunc(("Nothing to do\n"));
364 else
365 {
366 /*
367 * We have to try several times since the host can be picky
368 * about certain addresses.
369 */
370 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
371 uint32_t cbHypervisor = pReq->hypervisorSize;
372 RTR0MEMOBJ ahTries[5];
373 uint32_t iTry;
374 bool fBitched = false;
375 LogFlowFunc(("cbHypervisor=%#x\n", cbHypervisor));
376 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
377 {
378 /*
379 * Reserve space, or if that isn't supported, create a object for
380 * some fictive physical memory and map that in to kernel space.
381 *
382 * To make the code a bit uglier, most systems cannot help with
383 * 4MB alignment, so we have to deal with that in addition to
384 * having two ways of getting the memory.
385 */
386 uint32_t uAlignment = _4M;
387 RTR0MEMOBJ hObj;
388 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
389 if (rc == VERR_NOT_SUPPORTED)
390 {
391 uAlignment = PAGE_SIZE;
392 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
393 }
394 /*
395 * If both RTR0MemObjReserveKernel calls above failed because either not supported or
396 * not implemented at all at the current platform, try to map the memory object into the
397 * virtual kernel space.
398 */
399 if (rc == VERR_NOT_SUPPORTED)
400 {
401 if (hFictive == NIL_RTR0MEMOBJ)
402 {
403 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
404 if (RT_FAILURE(rc))
405 break;
406 hFictive = hObj;
407 }
408 uAlignment = _4M;
409 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
410 if (rc == VERR_NOT_SUPPORTED)
411 {
412 uAlignment = PAGE_SIZE;
413 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
414 }
415 }
416 if (RT_FAILURE(rc))
417 {
418 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
419 rc, cbHypervisor, uAlignment, iTry));
420 fBitched = true;
421 break;
422 }
423
424 /*
425 * Try set it.
426 */
427 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
428 pReq->header.rc = VERR_INTERNAL_ERROR;
429 pReq->hypervisorSize = cbHypervisor;
430 pReq->hypervisorStart = (RTGCPTR32)(uintptr_t)RTR0MemObjAddress(hObj);
431 if ( uAlignment == PAGE_SIZE
432 && pReq->hypervisorStart & (_4M - 1))
433 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
434 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
435
436 rc = VbglGRPerform(&pReq->header);
437 if (RT_SUCCESS(rc))
438 {
439 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
440 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
441 RTR0MemObjAddress(pDevExt->hGuestMappings),
442 RTR0MemObjSize(pDevExt->hGuestMappings),
443 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
444 break;
445 }
446 ahTries[iTry] = hObj;
447 }
448
449 /*
450 * Cleanup failed attempts.
451 */
452 while (iTry-- > 0)
453 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
454 if ( RT_FAILURE(rc)
455 && hFictive != NIL_RTR0PTR)
456 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
457 if (RT_FAILURE(rc) && !fBitched)
458 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
459 }
460 VbglGRFree(&pReq->header);
461
462 /*
463 * We ignore failed attempts for now.
464 */
465 return VINF_SUCCESS;
466}
467
468
469/**
470 * Undo what vboxGuestInitFixateGuestMappings did.
471 *
472 * @param pDevExt The device extension.
473 */
474static void vboxGuestTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
475{
476 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
477 {
478 /*
479 * Tell the host that we're going to free the memory we reserved for
480 * it, the free it up. (Leak the memory if anything goes wrong here.)
481 */
482 VMMDevReqHypervisorInfo *pReq;
483 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
484 if (RT_SUCCESS(rc))
485 {
486 pReq->hypervisorStart = 0;
487 pReq->hypervisorSize = 0;
488 rc = VbglGRPerform(&pReq->header);
489 VbglGRFree(&pReq->header);
490 }
491 if (RT_SUCCESS(rc))
492 {
493 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
494 AssertRC(rc);
495 }
496 else
497 LogRel(("vboxGuestTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
498
499 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
500 }
501}
502
503
504/**
505 * Inflate the balloon by one chunk represented by an R0 memory object.
506 *
507 * The caller owns the balloon mutex.
508 *
509 * @returns IPRT status code.
510 * @param pMemObj Pointer to the R0 memory object.
511 * @param pReq The pre-allocated request for performing the VMMDev call.
512 */
513static int vboxGuestBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
514{
515 uint32_t iPage;
516 int rc;
517
518 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
519 {
520 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
521 pReq->aPhysPage[iPage] = phys;
522 }
523
524 pReq->fInflate = true;
525 pReq->header.size = cbChangeMemBalloonReq;
526 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
527
528 rc = VbglGRPerform(&pReq->header);
529 if (RT_FAILURE(rc))
530 LogRel(("vboxGuestBalloonInflate: VbglGRPerform failed. rc=%Rrc\n", rc));
531 return rc;
532}
533
534
535/**
536 * Deflate the balloon by one chunk - info the host and free the memory object.
537 *
538 * The caller owns the balloon mutex.
539 *
540 * @returns IPRT status code.
541 * @param pMemObj Pointer to the R0 memory object.
542 * The memory object will be freed afterwards.
543 * @param pReq The pre-allocated request for performing the VMMDev call.
544 */
545static int vboxGuestBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
546{
547 uint32_t iPage;
548 int rc;
549
550 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
551 {
552 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
553 pReq->aPhysPage[iPage] = phys;
554 }
555
556 pReq->fInflate = false;
557 pReq->header.size = cbChangeMemBalloonReq;
558 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
559
560 rc = VbglGRPerform(&pReq->header);
561 if (RT_FAILURE(rc))
562 {
563 LogRel(("vboxGuestBalloonDeflate: VbglGRPerform failed. rc=%Rrc\n", rc));
564 return rc;
565 }
566
567 rc = RTR0MemObjFree(*pMemObj, true);
568 if (RT_FAILURE(rc))
569 {
570 LogRel(("vboxGuestBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
571 return rc;
572 }
573
574 *pMemObj = NIL_RTR0MEMOBJ;
575 return VINF_SUCCESS;
576}
577
578
579/**
580 * Inflate/deflate the memory balloon and notify the host.
581 *
582 * This is a worker used by VBoxGuestCommonIOCtl_CheckMemoryBalloon - it takes
583 * the mutex.
584 *
585 * @returns VBox status code.
586 * @param pDevExt The device extension.
587 * @param pSession The session.
588 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
589 * @param pfHandleInR3 Where to return the handle-in-ring3 indicator
590 * (VINF_SUCCESS if set).
591 */
592static int vboxGuestSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, uint32_t *pfHandleInR3)
593{
594 int rc = VINF_SUCCESS;
595
596 if (pDevExt->MemBalloon.fUseKernelAPI)
597 {
598 VMMDevChangeMemBalloon *pReq;
599 uint32_t i;
600
601 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
602 {
603 LogRel(("vboxGuestSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
604 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
605 return VERR_INVALID_PARAMETER;
606 }
607
608 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
609 return VINF_SUCCESS; /* nothing to do */
610
611 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
612 && !pDevExt->MemBalloon.paMemObj)
613 {
614 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
615 if (!pDevExt->MemBalloon.paMemObj)
616 {
617 LogRel(("vboxGuestSetBalloonSizeKernel: no memory for paMemObj!\n"));
618 return VERR_NO_MEMORY;
619 }
620 }
621
622 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
623 if (RT_FAILURE(rc))
624 return rc;
625
626 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
627 {
628 /* inflate */
629 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
630 {
631 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
632 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
633 if (RT_FAILURE(rc))
634 {
635 if (rc == VERR_NOT_SUPPORTED)
636 {
637 /* not supported -- fall back to the R3-allocated memory. */
638 rc = VINF_SUCCESS;
639 pDevExt->MemBalloon.fUseKernelAPI = false;
640 Assert(pDevExt->MemBalloon.cChunks == 0);
641 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
642 }
643 /* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
644 * cannot allocate more memory => don't try further, just stop here */
645 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
646 break;
647 }
648
649 rc = vboxGuestBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
650 if (RT_FAILURE(rc))
651 {
652 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
653 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
654 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
655 break;
656 }
657 pDevExt->MemBalloon.cChunks++;
658 }
659 }
660 else
661 {
662 /* deflate */
663 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
664 {
665 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
666 if (RT_FAILURE(rc))
667 {
668 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
669 break;
670 }
671 pDevExt->MemBalloon.cChunks--;
672 }
673 }
674
675 VbglGRFree(&pReq->header);
676 }
677
678 /*
679 * Set the handle-in-ring3 indicator. When set Ring-3 will have to work
680 * the balloon changes via the other API.
681 */
682 *pfHandleInR3 = pDevExt->MemBalloon.fUseKernelAPI ? false : true;
683
684 return rc;
685}
686
687
688/**
689 * Sends heartbeat to host.
690 *
691 * @returns VBox status code.
692 */
693static int VBoxGuestHeartbeatSend(PVBOXGUESTDEVEXT pDevExt)
694{
695 int rc;
696 if (pDevExt->pReqGuestHeartbeat)
697 {
698 rc = VbglGRPerform(pDevExt->pReqGuestHeartbeat);
699 Log(("VBoxGuestHeartbeatSend: VbglGRPerform VBoxGuestHeartbeatSend completed with rc=%Rrc\n", rc));
700 }
701 else
702 rc = VERR_INVALID_STATE;
703 return rc;
704}
705
706
707/**
708 * Configure the host to check guest's heartbeat
709 * and get heartbeat interval from the host.
710 *
711 * @returns VBox status code.
712 * @param pDevExt The device extension.
713 * @param fEnabled Set true to enable guest heartbeat checks on host.
714 */
715static int VBoxGuestHeartbeatHostConfigure(PVBOXGUESTDEVEXT pDevExt, bool fEnabled)
716{
717 VMMDevReqHeartbeat *pReq;
718 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_HeartbeatConfigure);
719 Log(("VBoxGuestHeartbeatHostConfigure: VbglGRAlloc VBoxGuestHeartbeatHostConfigure completed with rc=%Rrc\n", rc));
720 if (RT_SUCCESS(rc))
721 {
722 pReq->fEnabled = fEnabled;
723 pReq->cNsInterval = 0;
724 rc = VbglGRPerform(&pReq->header);
725 Log(("VBoxGuestHeartbeatHostConfigure: VbglGRPerform VBoxGuestHeartbeatHostConfigure completed with rc=%Rrc\n", rc));
726 pDevExt->cNsHeartbeatInterval = pReq->cNsInterval;
727 VbglGRFree(&pReq->header);
728 }
729 return rc;
730}
731
732
733/**
734 * Callback for heartbeat timer.
735 */
736static DECLCALLBACK(void) VBoxGuestHeartbeatTimerHandler(PRTTIMER p1, void *pvUser, uint64_t p3)
737{
738 NOREF(p1);
739 NOREF(p3);
740
741 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
742 if (!pDevExt)
743 return;
744
745 int rc = VBoxGuestHeartbeatSend(pDevExt);
746 if (RT_FAILURE(rc))
747 {
748 Log(("HB Timer: VBoxGuestHeartbeatSend terminated with rc=%Rrc\n", rc));
749 }
750}
751
752
753/**
754 * Helper to reinit the VBoxVMM communication after hibernation.
755 *
756 * @returns VBox status code.
757 * @param pDevExt The device extension.
758 * @param enmOSType The OS type.
759 */
760int VBoxGuestReinitDevExtAfterHibernation(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
761{
762 int rc = VBoxGuestReportGuestInfo(enmOSType);
763 if (RT_SUCCESS(rc))
764 {
765 rc = VBoxGuestReportDriverStatus(true /* Driver is active */);
766 if (RT_FAILURE(rc))
767 LogFlowFunc(("Could not report guest driver status, rc=%Rrc\n", rc));
768 }
769 else
770 LogFlowFunc(("Could not report guest information to host, rc=%Rrc\n", rc));
771
772 LogFlowFunc(("Returned with rc=%Rrc\n", rc));
773 return rc;
774}
775
776
777/**
778 * Inflate/deflate the balloon by one chunk.
779 *
780 * Worker for VBoxGuestCommonIOCtl_ChangeMemoryBalloon - it takes the mutex.
781 *
782 * @returns VBox status code.
783 * @param pDevExt The device extension.
784 * @param pSession The session.
785 * @param u64ChunkAddr The address of the chunk to add to / remove from the
786 * balloon.
787 * @param fInflate Inflate if true, deflate if false.
788 */
789static int vboxGuestSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
790 uint64_t u64ChunkAddr, bool fInflate)
791{
792 VMMDevChangeMemBalloon *pReq;
793 int rc = VINF_SUCCESS;
794 uint32_t i;
795 PRTR0MEMOBJ pMemObj = NULL;
796
797 if (fInflate)
798 {
799 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
800 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
801 {
802 LogRel(("vboxGuestSetBalloonSize: cannot inflate balloon, already have %u chunks (max=%u)\n",
803 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
804 return VERR_INVALID_PARAMETER;
805 }
806
807 if (!pDevExt->MemBalloon.paMemObj)
808 {
809 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
810 if (!pDevExt->MemBalloon.paMemObj)
811 {
812 LogRel(("VBoxGuestSetBalloonSizeFromUser: no memory for paMemObj!\n"));
813 return VERR_NO_MEMORY;
814 }
815 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
816 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
817 }
818 }
819 else
820 {
821 if (pDevExt->MemBalloon.cChunks == 0)
822 {
823 AssertMsgFailed(("vboxGuestSetBalloonSize: cannot decrease balloon, already at size 0\n"));
824 return VERR_INVALID_PARAMETER;
825 }
826 }
827
828 /*
829 * Enumerate all memory objects and check if the object is already registered.
830 */
831 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
832 {
833 if ( fInflate
834 && !pMemObj
835 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
836 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
837 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == u64ChunkAddr)
838 {
839 if (fInflate)
840 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
841 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
842 break;
843 }
844 }
845 if (!pMemObj)
846 {
847 if (fInflate)
848 {
849 /* no free object pointer found -- should not happen */
850 return VERR_NO_MEMORY;
851 }
852
853 /* cannot free this memory as it wasn't provided before */
854 return VERR_NOT_FOUND;
855 }
856
857 /*
858 * Try inflate / default the balloon as requested.
859 */
860 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
861 if (RT_FAILURE(rc))
862 return rc;
863
864 if (fInflate)
865 {
866 rc = RTR0MemObjLockUser(pMemObj, (RTR3PTR)u64ChunkAddr, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
867 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
868 if (RT_SUCCESS(rc))
869 {
870 rc = vboxGuestBalloonInflate(pMemObj, pReq);
871 if (RT_SUCCESS(rc))
872 pDevExt->MemBalloon.cChunks++;
873 else
874 {
875 LogFlowFunc(("Inflating failed, rc=%Rrc\n", rc));
876 RTR0MemObjFree(*pMemObj, true);
877 *pMemObj = NIL_RTR0MEMOBJ;
878 }
879 }
880 }
881 else
882 {
883 rc = vboxGuestBalloonDeflate(pMemObj, pReq);
884 if (RT_SUCCESS(rc))
885 pDevExt->MemBalloon.cChunks--;
886 else
887 LogFlowFunc(("Deflating failed, rc=%Rrc\n", rc));
888 }
889
890 VbglGRFree(&pReq->header);
891 return rc;
892}
893
894
895/**
896 * Cleanup the memory balloon of a session.
897 *
898 * Will request the balloon mutex, so it must be valid and the caller must not
899 * own it already.
900 *
901 * @param pDevExt The device extension.
902 * @param pDevExt The session. Can be NULL at unload.
903 */
904static void vboxGuestCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
905{
906 RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
907 if ( pDevExt->MemBalloon.pOwner == pSession
908 || pSession == NULL /*unload*/)
909 {
910 if (pDevExt->MemBalloon.paMemObj)
911 {
912 VMMDevChangeMemBalloon *pReq;
913 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
914 if (RT_SUCCESS(rc))
915 {
916 uint32_t i;
917 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
918 {
919 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
920 if (RT_FAILURE(rc))
921 {
922 LogRelFunc(("Deflating balloon failed with rc=%Rrc; will leak %u chunks\n",
923 rc, pDevExt->MemBalloon.cChunks));
924 break;
925 }
926 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
927 pDevExt->MemBalloon.cChunks--;
928 }
929 VbglGRFree(&pReq->header);
930 }
931 else
932 LogRelFunc(("Failed to allocate VMMDev request buffer, rc=%Rrc; will leak %u chunks\n",
933 rc, pDevExt->MemBalloon.cChunks));
934 RTMemFree(pDevExt->MemBalloon.paMemObj);
935 pDevExt->MemBalloon.paMemObj = NULL;
936 }
937
938 pDevExt->MemBalloon.pOwner = NULL;
939 }
940 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
941}
942
943
944/**
945 * Initializes the VBoxGuest device extension when the
946 * device driver is loaded.
947 *
948 * The native code locates the VMMDev on the PCI bus and retrieve
949 * the MMIO and I/O port ranges, this function will take care of
950 * mapping the MMIO memory (if present). Upon successful return
951 * the native code should set up the interrupt handler.
952 *
953 * @returns VBox status code.
954 *
955 * @param pDevExt The device extension. Allocated by the native code.
956 * @param IOPortBase The base of the I/O port range.
957 * @param pvMMIOBase The base of the MMIO memory mapping.
958 * This is optional, pass NULL if not present.
959 * @param cbMMIO The size of the MMIO memory mapping.
960 * This is optional, pass 0 if not present.
961 * @param enmOSType The guest OS type to report to the VMMDev.
962 * @param fFixedEvents Events that will be enabled upon init and no client
963 * will ever be allowed to mask.
964 */
965int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
966 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
967{
968 int rc, rc2;
969
970#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
971 /*
972 * Create the release log.
973 */
974 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
975 RTUINT fFlags = RTLOGFLAGS_PREFIX_TIME | RTLOGFLAGS_PREFIX_TID
976 | RTLOGFLAGS_PREFIX_THREAD | RTLOGFLAGS_PREFIX_TIME_PROG;
977 PRTLOGGER pRelLogger;
978 rc = RTLogCreate(&pRelLogger, fFlags, "all",
979#ifdef DEBUG
980 "VBOXGUEST_LOG",
981#else
982 "VBOXGUEST_RELEASE_LOG",
983#endif
984 RT_ELEMENTS(s_apszGroups), s_apszGroups,
985 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
986 if (RT_SUCCESS(rc))
987 {
988 RTLogRelSetDefaultInstance(pRelLogger);
989
990 /* Explicitly flush the log in case of VBOXGUEST_RELEASE_LOG=buffered. */
991 RTLogFlush(pRelLogger);
992 }
993 /** @todo Add native hook for getting logger config parameters and setting
994 * them. On Linux we use the module parameter stuff (see vboxguestLinuxModInit). */
995#endif
996
997 /*
998 * Adjust fFixedEvents.
999 */
1000#ifdef VBOX_WITH_HGCM
1001 fFixedEvents |= VMMDEV_EVENT_HGCM;
1002#endif
1003
1004 /*
1005 * Initialize the data.
1006 */
1007 pDevExt->IOPortBase = IOPortBase;
1008 pDevExt->pVMMDevMemory = NULL;
1009 pDevExt->fFixedEvents = fFixedEvents;
1010 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
1011 pDevExt->EventSpinlock = NIL_RTSPINLOCK;
1012 pDevExt->pIrqAckEvents = NULL;
1013 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
1014 RTListInit(&pDevExt->WaitList);
1015#ifdef VBOX_WITH_HGCM
1016 RTListInit(&pDevExt->HGCMWaitList);
1017#endif
1018#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1019 RTListInit(&pDevExt->WakeUpList);
1020#endif
1021 RTListInit(&pDevExt->WokenUpList);
1022 RTListInit(&pDevExt->FreeList);
1023 RTListInit(&pDevExt->SessionList);
1024 pDevExt->fLoggingEnabled = false;
1025 pDevExt->f32PendingEvents = 0;
1026 pDevExt->u32MousePosChangedSeq = 0;
1027 pDevExt->SessionSpinlock = NIL_RTSPINLOCK;
1028 pDevExt->MemBalloon.hMtx = NIL_RTSEMFASTMUTEX;
1029 pDevExt->MemBalloon.cChunks = 0;
1030 pDevExt->MemBalloon.cMaxChunks = 0;
1031 pDevExt->MemBalloon.fUseKernelAPI = true;
1032 pDevExt->MemBalloon.paMemObj = NULL;
1033 pDevExt->MemBalloon.pOwner = NULL;
1034 pDevExt->MouseNotifyCallback.pfnNotify = NULL;
1035 pDevExt->MouseNotifyCallback.pvUser = NULL;
1036 pDevExt->pReqGuestHeartbeat = NULL;
1037
1038 /*
1039 * If there is an MMIO region validate the version and size.
1040 */
1041 if (pvMMIOBase)
1042 {
1043 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
1044 Assert(cbMMIO);
1045 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
1046 && pVMMDev->u32Size >= 32
1047 && pVMMDev->u32Size <= cbMMIO)
1048 {
1049 pDevExt->pVMMDevMemory = pVMMDev;
1050 LogFlowFunc(("VMMDevMemory: mapping=%p size=%#RX32 (%#RX32), version=%#RX32\n",
1051 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
1052 }
1053 else /* try live without it. */
1054 LogRelFunc(("Bogus VMMDev memory; u32Version=%RX32 (expected %RX32), u32Size=%RX32 (expected <= %RX32)\n",
1055 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
1056 }
1057
1058 pDevExt->u32AcquireModeGuestCaps = 0;
1059 pDevExt->u32SetModeGuestCaps = 0;
1060 pDevExt->u32GuestCaps = 0;
1061
1062 /*
1063 * Create the wait and session spinlocks as well as the ballooning mutex.
1064 */
1065 rc = RTSpinlockCreate(&pDevExt->EventSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestEvent");
1066 if (RT_SUCCESS(rc))
1067 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestSession");
1068 if (RT_FAILURE(rc))
1069 {
1070 LogRelFunc(("Failed to create spinlock, rc=%Rrc\n", rc));
1071 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
1072 RTSpinlockDestroy(pDevExt->EventSpinlock);
1073 return rc;
1074 }
1075
1076 rc = RTSemFastMutexCreate(&pDevExt->MemBalloon.hMtx);
1077 if (RT_FAILURE(rc))
1078 {
1079 LogRelFunc(("Failed to create mutex, rc=%Rrc\n", rc));
1080 RTSpinlockDestroy(pDevExt->SessionSpinlock);
1081 RTSpinlockDestroy(pDevExt->EventSpinlock);
1082 return rc;
1083 }
1084
1085 /*
1086 * Initialize the guest library and report the guest info back to VMMDev,
1087 * set the interrupt control filter mask, and fixate the guest mappings
1088 * made by the VMM.
1089 */
1090 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
1091 if (RT_SUCCESS(rc))
1092 {
1093 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
1094 if (RT_SUCCESS(rc))
1095 {
1096 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
1097 Assert(pDevExt->PhysIrqAckEvents != 0);
1098
1099 rc = VBoxGuestReportGuestInfo(enmOSType);
1100 if (RT_SUCCESS(rc))
1101 {
1102 /* Set the fixed event and disable the guest graphics capability
1103 * by default. The guest specific graphics driver will re-enable
1104 * the graphics capability if and when appropriate. */
1105 rc = vboxGuestUpdateHostFlags(pDevExt, NULL,
1106 HostFlags_FilterMask
1107 | HostFlags_Capabilities);
1108 if (RT_SUCCESS(rc))
1109 {
1110 vboxGuestInitFixateGuestMappings(pDevExt);
1111
1112 rc = VBoxGuestReportDriverStatus(true /* Driver is active */);
1113 if (RT_FAILURE(rc))
1114 LogRelFunc(("VBoxReportGuestDriverStatus failed, rc=%Rrc\n", rc));
1115
1116 /** @todo Move heartbeat initialization into a separate function. */
1117 /* Make sure that heartbeat checking is disabled. */
1118 rc = VBoxGuestHeartbeatHostConfigure(pDevExt, false);
1119 if (RT_SUCCESS(rc))
1120 {
1121 rc = VBoxGuestHeartbeatHostConfigure(pDevExt, true);
1122 if (RT_SUCCESS(rc))
1123 {
1124 /* Preallocate the request to use it from the timer callback because:
1125 * 1) on Windows VbglGRAlloc must be called at IRQL <= APC_LEVEL
1126 * and the timer callback runs at DISPATCH_LEVEL;
1127 * 2) avoid repeated allocations.
1128 */
1129 rc = VbglGRAlloc(&pDevExt->pReqGuestHeartbeat, sizeof(*pDevExt->pReqGuestHeartbeat), VMMDevReq_GuestHeartbeat);
1130 if (RT_FAILURE(rc))
1131 LogRelFunc(("VbglGRAlloc(VMMDevReq_GuestHeartbeat) %Rrc\n", rc));
1132
1133 if (RT_SUCCESS(rc))
1134 {
1135 LogFlowFunc(("Setting up heartbeat to trigger every %RU64 sec\n", pDevExt->cNsHeartbeatInterval / 1000000000));
1136 rc = RTTimerCreateEx(&pDevExt->pHeartbeatTimer, pDevExt->cNsHeartbeatInterval,
1137 0, (PFNRTTIMER)VBoxGuestHeartbeatTimerHandler, pDevExt);
1138 }
1139
1140 if (RT_SUCCESS(rc))
1141 {
1142 rc = RTTimerStart(pDevExt->pHeartbeatTimer, 0);
1143 if (RT_FAILURE(rc))
1144 LogRelFunc(("Heartbeat timer failed to start, rc=%Rrc\n", rc));
1145 }
1146 if (RT_FAILURE(rc))
1147 {
1148 LogRelFunc(("Failed to set up the timer, guest heartbeat is disabled\n"));
1149 /* Disable host heartbeat check if we failed */
1150 VBoxGuestHeartbeatHostConfigure(pDevExt, false);
1151
1152 VbglGRFree(pDevExt->pReqGuestHeartbeat);
1153 pDevExt->pReqGuestHeartbeat = NULL;
1154 }
1155 }
1156 else
1157 LogRelFunc(("Failed to configure host for heartbeat checking, rc=%Rrc\n", rc));
1158 }
1159
1160 LogFlowFunc(("VBoxGuestInitDevExt: returns success\n"));
1161 return VINF_SUCCESS;
1162 }
1163 else
1164 LogRelFunc(("Failed to set host flags, rc=%Rrc\n", rc));
1165 }
1166 else
1167 LogRelFunc(("VBoxGuestInitDevExt: VBoxReportGuestInfo failed, rc=%Rrc\n", rc));
1168 VbglGRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
1169 }
1170 else
1171 LogRelFunc(("VBoxGRAlloc failed, rc=%Rrc\n", rc));
1172
1173 VbglTerminate();
1174 }
1175 else
1176 LogRelFunc(("VbglInit failed, rc=%Rrc\n", rc));
1177
1178 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
1179 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
1180 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
1181
1182#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
1183 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
1184 RTLogDestroy(RTLogSetDefaultInstance(NULL));
1185#endif
1186 return rc; /* (failed) */
1187}
1188
1189
1190/**
1191 * Deletes all the items in a wait chain.
1192 * @param pList The head of the chain.
1193 */
1194static void VBoxGuestDeleteWaitList(PRTLISTNODE pList)
1195{
1196 while (!RTListIsEmpty(pList))
1197 {
1198 int rc2;
1199 PVBOXGUESTWAIT pWait = RTListGetFirst(pList, VBOXGUESTWAIT, ListNode);
1200 RTListNodeRemove(&pWait->ListNode);
1201
1202 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
1203 pWait->Event = NIL_RTSEMEVENTMULTI;
1204 pWait->pSession = NULL;
1205 RTMemFree(pWait);
1206 }
1207}
1208
1209
1210/**
1211 * Destroys the VBoxGuest device extension.
1212 *
1213 * The native code should call this before the driver is loaded,
1214 * but don't call this on shutdown.
1215 *
1216 * @param pDevExt The device extension.
1217 */
1218void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
1219{
1220 int rc2;
1221 Log(("VBoxGuestDeleteDevExt:\n"));
1222 Log(("VBoxGuest: The additions driver is terminating.\n"));
1223
1224 /*
1225 * Stop and destroy HB timer and
1226 * disable host heartbeat checking.
1227 */
1228 if (pDevExt->pHeartbeatTimer)
1229 {
1230 RTTimerDestroy(pDevExt->pHeartbeatTimer);
1231 VBoxGuestHeartbeatHostConfigure(pDevExt, false);
1232 }
1233
1234 VbglGRFree(pDevExt->pReqGuestHeartbeat);
1235 pDevExt->pReqGuestHeartbeat = NULL;
1236
1237 /*
1238 * Clean up the bits that involves the host first.
1239 */
1240 vboxGuestTermUnfixGuestMappings(pDevExt);
1241 if (!RTListIsEmpty(&pDevExt->SessionList))
1242 {
1243 LogRelFunc(("session list not empty!\n"));
1244 RTListInit(&pDevExt->SessionList);
1245 }
1246 /* Update the host flags (mouse status etc) not to reflect this session. */
1247 pDevExt->fFixedEvents = 0;
1248 vboxGuestUpdateHostFlags(pDevExt, NULL, HostFlags_All);
1249 vboxGuestCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
1250
1251 /*
1252 * Cleanup all the other resources.
1253 */
1254 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
1255 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
1256 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
1257
1258 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
1259#ifdef VBOX_WITH_HGCM
1260 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
1261#endif
1262#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1263 VBoxGuestDeleteWaitList(&pDevExt->WakeUpList);
1264#endif
1265 VBoxGuestDeleteWaitList(&pDevExt->WokenUpList);
1266 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
1267
1268 VbglTerminate();
1269
1270 pDevExt->pVMMDevMemory = NULL;
1271
1272 pDevExt->IOPortBase = 0;
1273 pDevExt->pIrqAckEvents = NULL;
1274
1275#ifdef VBOX_GUESTDRV_WITH_RELEASE_LOGGER
1276 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
1277 RTLogDestroy(RTLogSetDefaultInstance(NULL));
1278#endif
1279
1280}
1281
1282
1283/**
1284 * Creates a VBoxGuest user session.
1285 *
1286 * The native code calls this when a ring-3 client opens the device.
1287 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
1288 *
1289 * @returns VBox status code.
1290 * @param pDevExt The device extension.
1291 * @param ppSession Where to store the session on success.
1292 */
1293int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
1294{
1295 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
1296 if (RT_UNLIKELY(!pSession))
1297 {
1298 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
1299 return VERR_NO_MEMORY;
1300 }
1301
1302 pSession->Process = RTProcSelf();
1303 pSession->R0Process = RTR0ProcHandleSelf();
1304 pSession->pDevExt = pDevExt;
1305 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1306 RTListAppend(&pDevExt->SessionList, &pSession->ListNode);
1307 RTSpinlockRelease(pDevExt->SessionSpinlock);
1308
1309 *ppSession = pSession;
1310 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1311 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1312 return VINF_SUCCESS;
1313}
1314
1315
1316/**
1317 * Creates a VBoxGuest kernel session.
1318 *
1319 * The native code calls this when a ring-0 client connects to the device.
1320 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
1321 *
1322 * @returns VBox status code.
1323 * @param pDevExt The device extension.
1324 * @param ppSession Where to store the session on success.
1325 */
1326int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
1327{
1328 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
1329 if (RT_UNLIKELY(!pSession))
1330 {
1331 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
1332 return VERR_NO_MEMORY;
1333 }
1334
1335 pSession->Process = NIL_RTPROCESS;
1336 pSession->R0Process = NIL_RTR0PROCESS;
1337 pSession->pDevExt = pDevExt;
1338 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1339 RTListAppend(&pDevExt->SessionList, &pSession->ListNode);
1340 RTSpinlockRelease(pDevExt->SessionSpinlock);
1341
1342 *ppSession = pSession;
1343 LogFlowFunc(("pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1344 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1345 return VINF_SUCCESS;
1346}
1347
1348static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession);
1349
1350/**
1351 * Closes a VBoxGuest session.
1352 *
1353 * @param pDevExt The device extension.
1354 * @param pSession The session to close (and free).
1355 */
1356void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1357{
1358 unsigned i; NOREF(i);
1359 LogFlowFunc(("pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1360 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1361
1362 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1363 RTListNodeRemove(&pSession->ListNode);
1364 RTSpinlockRelease(pDevExt->SessionSpinlock);
1365 VBoxGuestCommonGuestCapsAcquire(pDevExt, pSession, 0, UINT32_MAX, VBOXGUESTCAPSACQUIRE_FLAGS_NONE);
1366
1367 VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
1368
1369#ifdef VBOX_WITH_HGCM
1370 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1371 if (pSession->aHGCMClientIds[i])
1372 {
1373 VBoxGuestHGCMDisconnectInfo Info;
1374 Info.result = 0;
1375 Info.u32ClientID = pSession->aHGCMClientIds[i];
1376 pSession->aHGCMClientIds[i] = 0;
1377 LogFlowFunc(("Disconnecting client ID=%#RX32\n", Info.u32ClientID));
1378 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1379 }
1380#endif
1381
1382 pSession->pDevExt = NULL;
1383 pSession->Process = NIL_RTPROCESS;
1384 pSession->R0Process = NIL_RTR0PROCESS;
1385 vboxGuestCloseMemBalloon(pDevExt, pSession);
1386 RTMemFree(pSession);
1387 /* Update the host flags (mouse status etc) not to reflect this session. */
1388 vboxGuestUpdateHostFlags(pDevExt, NULL, HostFlags_All
1389#ifdef RT_OS_WINDOWS
1390 & (~HostFlags_MouseStatus)
1391#endif
1392 );
1393}
1394
1395
1396/**
1397 * Allocates a wait-for-event entry.
1398 *
1399 * @returns The wait-for-event entry.
1400 * @param pDevExt The device extension.
1401 * @param pSession The session that's allocating this. Can be NULL.
1402 */
1403static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1404{
1405 /*
1406 * Allocate it one way or the other.
1407 */
1408 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1409 if (pWait)
1410 {
1411 RTSpinlockAcquire(pDevExt->EventSpinlock);
1412
1413 pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1414 if (pWait)
1415 RTListNodeRemove(&pWait->ListNode);
1416
1417 RTSpinlockRelease(pDevExt->EventSpinlock);
1418 }
1419 if (!pWait)
1420 {
1421 static unsigned s_cErrors = 0;
1422 int rc;
1423
1424 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1425 if (!pWait)
1426 {
1427 if (s_cErrors++ < 32)
1428 LogRelFunc(("Out of memory, returning NULL\n"));
1429 return NULL;
1430 }
1431
1432 rc = RTSemEventMultiCreate(&pWait->Event);
1433 if (RT_FAILURE(rc))
1434 {
1435 if (s_cErrors++ < 32)
1436 LogRelFunc(("RTSemEventMultiCreate failed with rc=%Rrc\n", rc));
1437 RTMemFree(pWait);
1438 return NULL;
1439 }
1440
1441 pWait->ListNode.pNext = NULL;
1442 pWait->ListNode.pPrev = NULL;
1443 }
1444
1445 /*
1446 * Zero members just as an precaution.
1447 */
1448 pWait->fReqEvents = 0;
1449 pWait->fResEvents = 0;
1450#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1451 pWait->fPendingWakeUp = false;
1452 pWait->fFreeMe = false;
1453#endif
1454 pWait->pSession = pSession;
1455#ifdef VBOX_WITH_HGCM
1456 pWait->pHGCMReq = NULL;
1457#endif
1458 RTSemEventMultiReset(pWait->Event);
1459 return pWait;
1460}
1461
1462
1463/**
1464 * Frees the wait-for-event entry.
1465 *
1466 * The caller must own the wait spinlock !
1467 * The entry must be in a list!
1468 *
1469 * @param pDevExt The device extension.
1470 * @param pWait The wait-for-event entry to free.
1471 */
1472static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1473{
1474 pWait->fReqEvents = 0;
1475 pWait->fResEvents = 0;
1476#ifdef VBOX_WITH_HGCM
1477 pWait->pHGCMReq = NULL;
1478#endif
1479#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1480 Assert(!pWait->fFreeMe);
1481 if (pWait->fPendingWakeUp)
1482 pWait->fFreeMe = true;
1483 else
1484#endif
1485 {
1486 RTListNodeRemove(&pWait->ListNode);
1487 RTListAppend(&pDevExt->FreeList, &pWait->ListNode);
1488 }
1489}
1490
1491
1492/**
1493 * Frees the wait-for-event entry.
1494 *
1495 * @param pDevExt The device extension.
1496 * @param pWait The wait-for-event entry to free.
1497 */
1498static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1499{
1500 RTSpinlockAcquire(pDevExt->EventSpinlock);
1501 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1502 RTSpinlockRelease(pDevExt->EventSpinlock);
1503}
1504
1505
1506#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1507/**
1508 * Processes the wake-up list.
1509 *
1510 * All entries in the wake-up list gets signalled and moved to the woken-up
1511 * list.
1512 *
1513 * @param pDevExt The device extension.
1514 */
1515void VBoxGuestWaitDoWakeUps(PVBOXGUESTDEVEXT pDevExt)
1516{
1517 if (!RTListIsEmpty(&pDevExt->WakeUpList))
1518 {
1519 RTSpinlockAcquire(pDevExt->EventSpinlock);
1520 for (;;)
1521 {
1522 int rc;
1523 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->WakeUpList, VBOXGUESTWAIT, ListNode);
1524 if (!pWait)
1525 break;
1526 pWait->fPendingWakeUp = true;
1527 RTSpinlockRelease(pDevExt->EventSpinlock);
1528
1529 rc = RTSemEventMultiSignal(pWait->Event);
1530 AssertRC(rc);
1531
1532 RTSpinlockAcquire(pDevExt->EventSpinlock);
1533 pWait->fPendingWakeUp = false;
1534 if (!pWait->fFreeMe)
1535 {
1536 RTListNodeRemove(&pWait->ListNode);
1537 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1538 }
1539 else
1540 {
1541 pWait->fFreeMe = false;
1542 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1543 }
1544 }
1545 RTSpinlockRelease(pDevExt->EventSpinlock);
1546 }
1547}
1548#endif /* VBOXGUEST_USE_DEFERRED_WAKE_UP */
1549
1550
1551/**
1552 * Modifies the guest capabilities.
1553 *
1554 * Should be called during driver init and termination.
1555 *
1556 * @returns VBox status code.
1557 * @param fOr The Or mask (what to enable).
1558 * @param fNot The Not mask (what to disable).
1559 */
1560int VBoxGuestSetGuestCapabilities(uint32_t fOr, uint32_t fNot)
1561{
1562 VMMDevReqGuestCapabilities2 *pReq;
1563 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
1564 if (RT_FAILURE(rc))
1565 {
1566 LogFlowFunc(("Failed to allocate %u (%#x) bytes to cache the request; rc=%Rrc\n",
1567 sizeof(*pReq), sizeof(*pReq), rc));
1568 return rc;
1569 }
1570
1571 pReq->u32OrMask = fOr;
1572 pReq->u32NotMask = fNot;
1573
1574 rc = VbglGRPerform(&pReq->header);
1575 if (RT_FAILURE(rc))
1576 LogFlowFunc(("VbglGRPerform failed, rc=%Rrc\n", rc));
1577
1578 VbglGRFree(&pReq->header);
1579 return rc;
1580}
1581
1582
1583/**
1584 * Implements the fast (no input or output) type of IOCtls.
1585 *
1586 * This is currently just a placeholder stub inherited from the support driver code.
1587 *
1588 * @returns VBox status code.
1589 * @param iFunction The IOCtl function number.
1590 * @param pDevExt The device extension.
1591 * @param pSession The session.
1592 */
1593int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1594{
1595 LogFlowFunc(("iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1596
1597 NOREF(iFunction);
1598 NOREF(pDevExt);
1599 NOREF(pSession);
1600 return VERR_NOT_SUPPORTED;
1601}
1602
1603
1604/**
1605 * Return the VMM device port.
1606 *
1607 * returns IPRT status code.
1608 * @param pDevExt The device extension.
1609 * @param pInfo The request info.
1610 * @param pcbDataReturned (out) contains the number of bytes to return.
1611 */
1612static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
1613{
1614 LogFlowFuncEnter();
1615
1616 pInfo->portAddress = pDevExt->IOPortBase;
1617 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
1618 if (pcbDataReturned)
1619 *pcbDataReturned = sizeof(*pInfo);
1620 return VINF_SUCCESS;
1621}
1622
1623
1624#ifndef RT_OS_WINDOWS
1625/**
1626 * Set the callback for the kernel mouse handler.
1627 *
1628 * returns IPRT status code.
1629 * @param pDevExt The device extension.
1630 * @param pNotify The new callback information.
1631 */
1632int VBoxGuestCommonIOCtl_SetMouseNotifyCallback(PVBOXGUESTDEVEXT pDevExt, VBoxGuestMouseSetNotifyCallback *pNotify)
1633{
1634 LogFlowFuncEnter();
1635
1636 RTSpinlockAcquire(pDevExt->EventSpinlock);
1637 pDevExt->MouseNotifyCallback = *pNotify;
1638 RTSpinlockRelease(pDevExt->EventSpinlock);
1639 return VINF_SUCCESS;
1640}
1641#endif
1642
1643
1644/**
1645 * Worker VBoxGuestCommonIOCtl_WaitEvent.
1646 *
1647 * The caller enters the spinlock, we leave it.
1648 *
1649 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
1650 */
1651DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestWaitEventInfo *pInfo,
1652 int iEvent, const uint32_t fReqEvents)
1653{
1654 uint32_t fMatches = VBoxGuestCommonGetAndCleanPendingEventsLocked(pDevExt, pSession, fReqEvents);
1655 if (fMatches || pSession->fPendingCancelWaitEvents)
1656 {
1657 RTSpinlockRelease(pDevExt->EventSpinlock);
1658
1659 pInfo->u32EventFlagsOut = fMatches;
1660 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1661 if (fReqEvents & ~((uint32_t)1 << iEvent))
1662 LogFlowFunc(("WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1663 else
1664 LogFlowFunc(("WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1665 pSession->fPendingCancelWaitEvents = false;
1666 return VINF_SUCCESS;
1667 }
1668
1669 RTSpinlockRelease(pDevExt->EventSpinlock);
1670 return VERR_TIMEOUT;
1671}
1672
1673
1674static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1675 VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned, bool fInterruptible)
1676{
1677 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
1678 uint32_t fResEvents;
1679 int iEvent;
1680 PVBOXGUESTWAIT pWait;
1681 int rc;
1682
1683 pInfo->u32EventFlagsOut = 0;
1684 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1685 if (pcbDataReturned)
1686 *pcbDataReturned = sizeof(*pInfo);
1687
1688 /*
1689 * Copy and verify the input mask.
1690 */
1691 iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
1692 if (RT_UNLIKELY(iEvent < 0))
1693 {
1694 LogRel(("Invalid input mask %#x\n", fReqEvents));
1695 return VERR_INVALID_PARAMETER;
1696 }
1697
1698 /*
1699 * Check the condition up front, before doing the wait-for-event allocations.
1700 */
1701 RTSpinlockAcquire(pDevExt->EventSpinlock);
1702 rc = WaitEventCheckCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
1703 if (rc == VINF_SUCCESS)
1704 return rc;
1705
1706 if (!pInfo->u32TimeoutIn)
1707 {
1708 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1709 LogFlowFunc(("Returning VERR_TIMEOUT\n"));
1710 return VERR_TIMEOUT;
1711 }
1712
1713 pWait = VBoxGuestWaitAlloc(pDevExt, pSession);
1714 if (!pWait)
1715 return VERR_NO_MEMORY;
1716 pWait->fReqEvents = fReqEvents;
1717
1718 /*
1719 * We've got the wait entry now, re-enter the spinlock and check for the condition.
1720 * If the wait condition is met, return.
1721 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
1722 */
1723 RTSpinlockAcquire(pDevExt->EventSpinlock);
1724 RTListAppend(&pDevExt->WaitList, &pWait->ListNode);
1725 rc = WaitEventCheckCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
1726 if (rc == VINF_SUCCESS)
1727 {
1728 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1729 return rc;
1730 }
1731
1732 if (fInterruptible)
1733 rc = RTSemEventMultiWaitNoResume(pWait->Event,
1734 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1735 else
1736 rc = RTSemEventMultiWait(pWait->Event,
1737 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1738
1739 /*
1740 * There is one special case here and that's when the semaphore is
1741 * destroyed upon device driver unload. This shouldn't happen of course,
1742 * but in case it does, just get out of here ASAP.
1743 */
1744 if (rc == VERR_SEM_DESTROYED)
1745 return rc;
1746
1747 /*
1748 * Unlink the wait item and dispose of it.
1749 */
1750 RTSpinlockAcquire(pDevExt->EventSpinlock);
1751 fResEvents = pWait->fResEvents;
1752 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1753 RTSpinlockRelease(pDevExt->EventSpinlock);
1754
1755 /*
1756 * Now deal with the return code.
1757 */
1758 if ( fResEvents
1759 && fResEvents != UINT32_MAX)
1760 {
1761 pInfo->u32EventFlagsOut = fResEvents;
1762 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1763 if (fReqEvents & ~((uint32_t)1 << iEvent))
1764 LogFlowFunc(("Returning %#x\n", pInfo->u32EventFlagsOut));
1765 else
1766 LogFlowFunc(("Returning %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1767 rc = VINF_SUCCESS;
1768 }
1769 else if ( fResEvents == UINT32_MAX
1770 || rc == VERR_INTERRUPTED)
1771 {
1772 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
1773 rc = VERR_INTERRUPTED;
1774 LogFlowFunc(("Returning VERR_INTERRUPTED\n"));
1775 }
1776 else if (rc == VERR_TIMEOUT)
1777 {
1778 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1779 LogFlowFunc(("Returning VERR_TIMEOUT (2)\n"));
1780 }
1781 else
1782 {
1783 if (RT_SUCCESS(rc))
1784 {
1785 static unsigned s_cErrors = 0;
1786 if (s_cErrors++ < 32)
1787 LogRelFunc(("Returning %Rrc but no events\n", rc));
1788 rc = VERR_INTERNAL_ERROR;
1789 }
1790 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1791 LogFlowFunc(("Returning %Rrc\n", rc));
1792 }
1793
1794 return rc;
1795}
1796
1797
1798static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1799{
1800 PVBOXGUESTWAIT pWait;
1801 PVBOXGUESTWAIT pSafe;
1802 int rc = 0;
1803 /* Was as least one WAITEVENT in process for this session? If not we
1804 * set a flag that the next call should be interrupted immediately. This
1805 * is needed so that a user thread can reliably interrupt another one in a
1806 * WAITEVENT loop. */
1807 bool fCancelledOne = false;
1808
1809 LogFlowFunc(("CANCEL_ALL_WAITEVENTS\n"));
1810
1811 /*
1812 * Walk the event list and wake up anyone with a matching session.
1813 */
1814 RTSpinlockAcquire(pDevExt->EventSpinlock);
1815 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
1816 {
1817 if (pWait->pSession == pSession)
1818 {
1819 fCancelledOne = true;
1820 pWait->fResEvents = UINT32_MAX;
1821 RTListNodeRemove(&pWait->ListNode);
1822#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1823 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
1824#else
1825 rc |= RTSemEventMultiSignal(pWait->Event);
1826 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1827#endif
1828 }
1829 }
1830 if (!fCancelledOne)
1831 pSession->fPendingCancelWaitEvents = true;
1832 RTSpinlockRelease(pDevExt->EventSpinlock);
1833 Assert(rc == 0);
1834 NOREF(rc);
1835
1836#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1837 VBoxGuestWaitDoWakeUps(pDevExt);
1838#endif
1839
1840 return VINF_SUCCESS;
1841}
1842
1843/**
1844 * Checks if the VMM request is allowed in the context of the given session.
1845 *
1846 * @returns VINF_SUCCESS or VERR_PERMISSION_DENIED.
1847 * @param pSession The calling session.
1848 * @param enmType The request type.
1849 * @param pReqHdr The request.
1850 */
1851static int VBoxGuestCheckIfVMMReqAllowed(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VMMDevRequestType enmType,
1852 VMMDevRequestHeader const *pReqHdr)
1853{
1854 /*
1855 * Categorize the request being made.
1856 */
1857 /** @todo This need quite some more work! */
1858 enum
1859 {
1860 kLevel_Invalid, kLevel_NoOne, kLevel_OnlyVBoxGuest, kLevel_OnlyKernel, kLevel_TrustedUsers, kLevel_AllUsers
1861 } enmRequired;
1862 switch (enmType)
1863 {
1864 /*
1865 * Deny access to anything we don't know or provide specialized I/O controls for.
1866 */
1867#ifdef VBOX_WITH_HGCM
1868 case VMMDevReq_HGCMConnect:
1869 case VMMDevReq_HGCMDisconnect:
1870# ifdef VBOX_WITH_64_BITS_GUESTS
1871 case VMMDevReq_HGCMCall32:
1872 case VMMDevReq_HGCMCall64:
1873# else
1874 case VMMDevReq_HGCMCall:
1875# endif /* VBOX_WITH_64_BITS_GUESTS */
1876 case VMMDevReq_HGCMCancel:
1877 case VMMDevReq_HGCMCancel2:
1878#endif /* VBOX_WITH_HGCM */
1879 default:
1880 enmRequired = kLevel_NoOne;
1881 break;
1882
1883 /*
1884 * There are a few things only this driver can do (and it doesn't use
1885 * the VMMRequst I/O control route anyway, but whatever).
1886 */
1887 case VMMDevReq_ReportGuestInfo:
1888 case VMMDevReq_ReportGuestInfo2:
1889 case VMMDevReq_GetHypervisorInfo:
1890 case VMMDevReq_SetHypervisorInfo:
1891 case VMMDevReq_RegisterPatchMemory:
1892 case VMMDevReq_DeregisterPatchMemory:
1893 case VMMDevReq_GetMemBalloonChangeRequest:
1894 enmRequired = kLevel_OnlyVBoxGuest;
1895 break;
1896
1897 /*
1898 * Trusted users apps only.
1899 */
1900 case VMMDevReq_QueryCredentials:
1901 case VMMDevReq_ReportCredentialsJudgement:
1902 case VMMDevReq_RegisterSharedModule:
1903 case VMMDevReq_UnregisterSharedModule:
1904 case VMMDevReq_WriteCoreDump:
1905 case VMMDevReq_GetCpuHotPlugRequest:
1906 case VMMDevReq_SetCpuHotPlugStatus:
1907 case VMMDevReq_CheckSharedModules:
1908 case VMMDevReq_GetPageSharingStatus:
1909 case VMMDevReq_DebugIsPageShared:
1910 case VMMDevReq_ReportGuestStats:
1911 case VMMDevReq_ReportGuestUserState:
1912 case VMMDevReq_GetStatisticsChangeRequest:
1913 case VMMDevReq_ChangeMemBalloon:
1914 enmRequired = kLevel_TrustedUsers;
1915 break;
1916
1917 /*
1918 * Anyone. But not for CapsAcquire mode
1919 */
1920 case VMMDevReq_SetGuestCapabilities:
1921 {
1922 VMMDevReqGuestCapabilities2 *pCaps = (VMMDevReqGuestCapabilities2*)pReqHdr;
1923 uint32_t fAcquireCaps = 0;
1924 if (!VBoxGuestCommonGuestCapsModeSet(pDevExt, pCaps->u32OrMask, false, &fAcquireCaps))
1925 {
1926 AssertFailed();
1927 LogRel(("calling caps set for acquired caps %d\n", pCaps->u32OrMask));
1928 enmRequired = kLevel_NoOne;
1929 break;
1930 }
1931 /* hack to adjust the notcaps.
1932 * @todo: move to a better place
1933 * user-mode apps are allowed to pass any mask to the notmask,
1934 * the driver cleans up them accordingly */
1935 pCaps->u32NotMask &= ~fAcquireCaps;
1936 /* do not break, make it fall through to the below enmRequired setting */
1937 }
1938 /*
1939 * Anyone.
1940 */
1941 case VMMDevReq_GetMouseStatus:
1942 case VMMDevReq_SetMouseStatus:
1943 case VMMDevReq_SetPointerShape:
1944 case VMMDevReq_GetHostVersion:
1945 case VMMDevReq_Idle:
1946 case VMMDevReq_GetHostTime:
1947 case VMMDevReq_SetPowerStatus:
1948 case VMMDevReq_AcknowledgeEvents:
1949 case VMMDevReq_CtlGuestFilterMask:
1950 case VMMDevReq_ReportGuestStatus:
1951 case VMMDevReq_GetDisplayChangeRequest:
1952 case VMMDevReq_VideoModeSupported:
1953 case VMMDevReq_GetHeightReduction:
1954 case VMMDevReq_GetDisplayChangeRequest2:
1955 case VMMDevReq_VideoModeSupported2:
1956 case VMMDevReq_VideoAccelEnable:
1957 case VMMDevReq_VideoAccelFlush:
1958 case VMMDevReq_VideoSetVisibleRegion:
1959 case VMMDevReq_GetDisplayChangeRequestEx:
1960 case VMMDevReq_GetSeamlessChangeRequest:
1961 case VMMDevReq_GetVRDPChangeRequest:
1962 case VMMDevReq_LogString:
1963 case VMMDevReq_GetSessionId:
1964 enmRequired = kLevel_AllUsers;
1965 break;
1966
1967 /*
1968 * Depends on the request parameters...
1969 */
1970 /** @todo this have to be changed into an I/O control and the facilities
1971 * tracked in the session so they can automatically be failed when the
1972 * session terminates without reporting the new status.
1973 *
1974 * The information presented by IGuest is not reliable without this! */
1975 case VMMDevReq_ReportGuestCapabilities:
1976 switch (((VMMDevReportGuestStatus const *)pReqHdr)->guestStatus.facility)
1977 {
1978 case VBoxGuestFacilityType_All:
1979 case VBoxGuestFacilityType_VBoxGuestDriver:
1980 enmRequired = kLevel_OnlyVBoxGuest;
1981 break;
1982 case VBoxGuestFacilityType_VBoxService:
1983 enmRequired = kLevel_TrustedUsers;
1984 break;
1985 case VBoxGuestFacilityType_VBoxTrayClient:
1986 case VBoxGuestFacilityType_Seamless:
1987 case VBoxGuestFacilityType_Graphics:
1988 default:
1989 enmRequired = kLevel_AllUsers;
1990 break;
1991 }
1992 break;
1993 }
1994
1995 /*
1996 * Check against the session.
1997 */
1998 switch (enmRequired)
1999 {
2000 default:
2001 case kLevel_NoOne:
2002 break;
2003 case kLevel_OnlyVBoxGuest:
2004 case kLevel_OnlyKernel:
2005 if (pSession->R0Process == NIL_RTR0PROCESS)
2006 return VINF_SUCCESS;
2007 break;
2008 case kLevel_TrustedUsers:
2009 case kLevel_AllUsers:
2010 return VINF_SUCCESS;
2011 }
2012
2013 return VERR_PERMISSION_DENIED;
2014}
2015
2016static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2017 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
2018{
2019 int rc;
2020 VMMDevRequestHeader *pReqCopy;
2021
2022 /*
2023 * Validate the header and request size.
2024 */
2025 const VMMDevRequestType enmType = pReqHdr->requestType;
2026 const uint32_t cbReq = pReqHdr->size;
2027 const uint32_t cbMinSize = (uint32_t)vmmdevGetRequestSize(enmType);
2028
2029 LogFlowFunc(("Type=%d\n", pReqHdr->requestType));
2030
2031 if (cbReq < cbMinSize)
2032 {
2033 LogRelFunc(("Invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
2034 cbReq, cbMinSize, enmType));
2035 return VERR_INVALID_PARAMETER;
2036 }
2037 if (cbReq > cbData)
2038 {
2039 LogRelFunc(("Invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
2040 cbData, cbReq, enmType));
2041 return VERR_INVALID_PARAMETER;
2042 }
2043 rc = VbglGRVerify(pReqHdr, cbData);
2044 if (RT_FAILURE(rc))
2045 {
2046 LogFlowFunc(("Invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc\n",
2047 cbData, cbReq, enmType, rc));
2048 return rc;
2049 }
2050
2051 rc = VBoxGuestCheckIfVMMReqAllowed(pDevExt, pSession, enmType, pReqHdr);
2052 if (RT_FAILURE(rc))
2053 {
2054 LogFlowFunc(("Operation not allowed! type=%#x, rc=%Rrc\n", enmType, rc));
2055 return rc;
2056 }
2057
2058 /*
2059 * Make a copy of the request in the physical memory heap so
2060 * the VBoxGuestLibrary can more easily deal with the request.
2061 * (This is really a waste of time since the OS or the OS specific
2062 * code has already buffered or locked the input/output buffer, but
2063 * it does makes things a bit simpler wrt to phys address.)
2064 */
2065 rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
2066 if (RT_FAILURE(rc))
2067 {
2068 LogFlowFunc(("Failed to allocate %u (%#x) bytes to cache the request; rc=%Rrc\n",
2069 cbReq, cbReq, rc));
2070 return rc;
2071 }
2072 memcpy(pReqCopy, pReqHdr, cbReq);
2073
2074 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
2075 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
2076
2077 rc = VbglGRPerform(pReqCopy);
2078 if ( RT_SUCCESS(rc)
2079 && RT_SUCCESS(pReqCopy->rc))
2080 {
2081 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
2082 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
2083
2084 memcpy(pReqHdr, pReqCopy, cbReq);
2085 if (pcbDataReturned)
2086 *pcbDataReturned = cbReq;
2087 }
2088 else if (RT_FAILURE(rc))
2089 LogFlowFunc(("VbglGRPerform failed; rc=%Rrc\n", rc));
2090 else
2091 {
2092 LogFlowFunc(("Request execution failed; VMMDev rc=%Rrc\n",
2093 pReqCopy->rc));
2094 rc = pReqCopy->rc;
2095 }
2096
2097 VbglGRFree(pReqCopy);
2098 return rc;
2099}
2100
2101
2102static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt,
2103 PVBOXGUESTSESSION pSession,
2104 VBoxGuestFilterMaskInfo *pInfo)
2105{
2106 int rc;
2107
2108 if ((pInfo->u32OrMask | pInfo->u32NotMask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
2109 return VERR_INVALID_PARAMETER;
2110 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2111 pSession->fFilterMask |= pInfo->u32OrMask;
2112 pSession->fFilterMask &= ~pInfo->u32NotMask;
2113 RTSpinlockRelease(pDevExt->SessionSpinlock);
2114 rc = vboxGuestUpdateHostFlags(pDevExt, pSession, HostFlags_FilterMask);
2115 return rc;
2116}
2117
2118
2119static int VBoxGuestCommonIOCtl_SetCapabilities(PVBOXGUESTDEVEXT pDevExt,
2120 PVBOXGUESTSESSION pSession,
2121 VBoxGuestSetCapabilitiesInfo *pInfo)
2122{
2123 int rc;
2124
2125 if ( (pInfo->u32OrMask | pInfo->u32NotMask)
2126 & ~VMMDEV_GUEST_CAPABILITIES_MASK)
2127 return VERR_INVALID_PARAMETER;
2128 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2129 pSession->fCapabilities |= pInfo->u32OrMask;
2130 pSession->fCapabilities &= ~pInfo->u32NotMask;
2131 RTSpinlockRelease(pDevExt->SessionSpinlock);
2132 rc = vboxGuestUpdateHostFlags(pDevExt, pSession, HostFlags_Capabilities);
2133 return rc;
2134}
2135
2136
2137/**
2138 * Sets the mouse status features for this session and updates them
2139 * globally.
2140 *
2141 * @returns VBox status code.
2142 *
2143 * @param pDevExt The device extention.
2144 * @param pSession The session.
2145 * @param fFeatures New bitmap of enabled features.
2146 */
2147static int vboxGuestCommonIOCtl_SetMouseStatus(PVBOXGUESTDEVEXT pDevExt,
2148 PVBOXGUESTSESSION pSession,
2149 uint32_t fFeatures)
2150{
2151 int rc;
2152
2153 if (fFeatures & ~VMMDEV_MOUSE_GUEST_MASK)
2154 return VERR_INVALID_PARAMETER;
2155 /* Since this is more of a negative feature we invert it to get the real
2156 * feature (when the guest does not need the host cursor). */
2157 fFeatures ^= VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR;
2158 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2159 pSession->fMouseStatus = fFeatures;
2160 RTSpinlockRelease(pDevExt->SessionSpinlock);
2161 rc = vboxGuestUpdateHostFlags(pDevExt, pSession, HostFlags_MouseStatus);
2162 return rc;
2163}
2164
2165#ifdef VBOX_WITH_HGCM
2166
2167AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
2168
2169/** Worker for VBoxGuestHGCMAsyncWaitCallback*. */
2170static int VBoxGuestHGCMAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
2171 bool fInterruptible, uint32_t cMillies)
2172{
2173 int rc;
2174
2175 /*
2176 * Check to see if the condition was met by the time we got here.
2177 *
2178 * We create a simple poll loop here for dealing with out-of-memory
2179 * conditions since the caller isn't necessarily able to deal with
2180 * us returning too early.
2181 */
2182 PVBOXGUESTWAIT pWait;
2183 for (;;)
2184 {
2185 RTSpinlockAcquire(pDevExt->EventSpinlock);
2186 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
2187 {
2188 RTSpinlockRelease(pDevExt->EventSpinlock);
2189 return VINF_SUCCESS;
2190 }
2191 RTSpinlockRelease(pDevExt->EventSpinlock);
2192
2193 pWait = VBoxGuestWaitAlloc(pDevExt, NULL);
2194 if (pWait)
2195 break;
2196 if (fInterruptible)
2197 return VERR_INTERRUPTED;
2198 RTThreadSleep(1);
2199 }
2200 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
2201 pWait->pHGCMReq = pHdr;
2202
2203 /*
2204 * Re-enter the spinlock and re-check for the condition.
2205 * If the condition is met, return.
2206 * Otherwise link us into the HGCM wait list and go to sleep.
2207 */
2208 RTSpinlockAcquire(pDevExt->EventSpinlock);
2209 RTListAppend(&pDevExt->HGCMWaitList, &pWait->ListNode);
2210 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
2211 {
2212 VBoxGuestWaitFreeLocked(pDevExt, pWait);
2213 RTSpinlockRelease(pDevExt->EventSpinlock);
2214 return VINF_SUCCESS;
2215 }
2216 RTSpinlockRelease(pDevExt->EventSpinlock);
2217
2218 if (fInterruptible)
2219 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
2220 else
2221 rc = RTSemEventMultiWait(pWait->Event, cMillies);
2222 if (rc == VERR_SEM_DESTROYED)
2223 return rc;
2224
2225 /*
2226 * Unlink, free and return.
2227 */
2228 if ( RT_FAILURE(rc)
2229 && rc != VERR_TIMEOUT
2230 && ( !fInterruptible
2231 || rc != VERR_INTERRUPTED))
2232 LogRelFlow(("wait failed! %Rrc\n", rc));
2233
2234 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
2235 return rc;
2236}
2237
2238
2239/**
2240 * This is a callback for dealing with async waits.
2241 *
2242 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
2243 */
2244static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
2245{
2246 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
2247 LogFlowFunc(("requestType=%d\n", pHdr->header.requestType));
2248 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
2249 pDevExt,
2250 false /* fInterruptible */,
2251 u32User /* cMillies */);
2252}
2253
2254
2255/**
2256 * This is a callback for dealing with async waits with a timeout.
2257 *
2258 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
2259 */
2260static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr,
2261 void *pvUser, uint32_t u32User)
2262{
2263 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
2264 LogFlowFunc(("requestType=%d\n", pHdr->header.requestType));
2265 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
2266 pDevExt,
2267 true /* fInterruptible */,
2268 u32User /* cMillies */ );
2269
2270}
2271
2272
2273static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2274 VBoxGuestHGCMConnectInfo *pInfo, size_t *pcbDataReturned)
2275{
2276 int rc;
2277
2278 /*
2279 * The VbglHGCMConnect call will invoke the callback if the HGCM
2280 * call is performed in an ASYNC fashion. The function is not able
2281 * to deal with cancelled requests.
2282 */
2283 LogFlowFunc(("%.128s\n",
2284 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
2285 ? pInfo->Loc.u.host.achName : "<not local host>"));
2286
2287 rc = VbglR0HGCMInternalConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2288 if (RT_SUCCESS(rc))
2289 {
2290 LogFlowFunc(("u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
2291 pInfo->u32ClientID, pInfo->result, rc));
2292 if (RT_SUCCESS(pInfo->result))
2293 {
2294 /*
2295 * Append the client id to the client id table.
2296 * If the table has somehow become filled up, we'll disconnect the session.
2297 */
2298 unsigned i;
2299 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2300 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2301 if (!pSession->aHGCMClientIds[i])
2302 {
2303 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
2304 break;
2305 }
2306 RTSpinlockRelease(pDevExt->SessionSpinlock);
2307 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
2308 {
2309 static unsigned s_cErrors = 0;
2310 VBoxGuestHGCMDisconnectInfo Info;
2311
2312 if (s_cErrors++ < 32)
2313 LogRelFunc(("Too many HGCMConnect calls for one session\n"));
2314
2315 Info.result = 0;
2316 Info.u32ClientID = pInfo->u32ClientID;
2317 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2318 return VERR_TOO_MANY_OPEN_FILES;
2319 }
2320 }
2321 if (pcbDataReturned)
2322 *pcbDataReturned = sizeof(*pInfo);
2323 }
2324 return rc;
2325}
2326
2327
2328static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
2329 size_t *pcbDataReturned)
2330{
2331 /*
2332 * Validate the client id and invalidate its entry while we're in the call.
2333 */
2334 int rc;
2335 const uint32_t u32ClientId = pInfo->u32ClientID;
2336 unsigned i;
2337 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2338 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2339 if (pSession->aHGCMClientIds[i] == u32ClientId)
2340 {
2341 pSession->aHGCMClientIds[i] = UINT32_MAX;
2342 break;
2343 }
2344 RTSpinlockRelease(pDevExt->SessionSpinlock);
2345 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
2346 {
2347 static unsigned s_cErrors = 0;
2348 if (s_cErrors++ > 32)
2349 LogRelFunc(("u32Client=%RX32\n", u32ClientId));
2350 return VERR_INVALID_HANDLE;
2351 }
2352
2353 /*
2354 * The VbglHGCMConnect call will invoke the callback if the HGCM
2355 * call is performed in an ASYNC fashion. The function is not able
2356 * to deal with cancelled requests.
2357 */
2358 LogFlowFunc(("u32Client=%RX32\n", pInfo->u32ClientID));
2359 rc = VbglR0HGCMInternalDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2360 if (RT_SUCCESS(rc))
2361 {
2362 LogFlowFunc(("Disconnected with rc=%Rrc\n", pInfo->result)); /** int32_t vs. int! */
2363 if (pcbDataReturned)
2364 *pcbDataReturned = sizeof(*pInfo);
2365 }
2366
2367 /* Update the client id array according to the result. */
2368 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2369 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
2370 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
2371 RTSpinlockRelease(pDevExt->SessionSpinlock);
2372
2373 return rc;
2374}
2375
2376
2377static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt,
2378 PVBOXGUESTSESSION pSession,
2379 VBoxGuestHGCMCallInfo *pInfo,
2380 uint32_t cMillies, bool fInterruptible, bool f32bit, bool fUserData,
2381 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
2382{
2383 const uint32_t u32ClientId = pInfo->u32ClientID;
2384 uint32_t fFlags;
2385 size_t cbActual;
2386 unsigned i;
2387 int rc;
2388
2389 /*
2390 * Some more validations.
2391 */
2392 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
2393 {
2394 LogRelFunc(("cParm=%RX32 is not sane\n", pInfo->cParms));
2395 return VERR_INVALID_PARAMETER;
2396 }
2397
2398 cbActual = cbExtra + sizeof(*pInfo);
2399#ifdef RT_ARCH_AMD64
2400 if (f32bit)
2401 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
2402 else
2403#endif
2404 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
2405 if (cbData < cbActual)
2406 {
2407 LogRelFunc(("cbData=%#zx (%zu) required size is %#zx (%zu)\n",
2408 cbData, cbData, cbActual, cbActual));
2409 return VERR_INVALID_PARAMETER;
2410 }
2411
2412 /*
2413 * Validate the client id.
2414 */
2415 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2416 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2417 if (pSession->aHGCMClientIds[i] == u32ClientId)
2418 break;
2419 RTSpinlockRelease(pDevExt->SessionSpinlock);
2420 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
2421 {
2422 static unsigned s_cErrors = 0;
2423 if (s_cErrors++ > 32)
2424 LogRelFunc(("Invalid handle; u32Client=%RX32\n", u32ClientId));
2425 return VERR_INVALID_HANDLE;
2426 }
2427
2428 /*
2429 * The VbglHGCMCall call will invoke the callback if the HGCM
2430 * call is performed in an ASYNC fashion. This function can
2431 * deal with cancelled requests, so we let user more requests
2432 * be interruptible (should add a flag for this later I guess).
2433 */
2434 LogFlowFunc(("u32Client=%RX32\n", pInfo->u32ClientID));
2435 fFlags = !fUserData && pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
2436 uint32_t cbInfo = (uint32_t)(cbData - cbExtra);
2437#ifdef RT_ARCH_AMD64
2438 if (f32bit)
2439 {
2440 if (fInterruptible)
2441 rc = VbglR0HGCMInternalCall32(pInfo, cbInfo, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2442 else
2443 rc = VbglR0HGCMInternalCall32(pInfo, cbInfo, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
2444 }
2445 else
2446#endif
2447 {
2448 if (fInterruptible)
2449 rc = VbglR0HGCMInternalCall(pInfo, cbInfo, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2450 else
2451 rc = VbglR0HGCMInternalCall(pInfo, cbInfo, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
2452 }
2453 if (RT_SUCCESS(rc))
2454 {
2455 LogFlowFunc(("Result rc=%Rrc\n", pInfo->result)); /** int32_t vs. int! */
2456 if (pcbDataReturned)
2457 *pcbDataReturned = cbActual;
2458 }
2459 else
2460 {
2461 if ( rc != VERR_INTERRUPTED
2462 && rc != VERR_TIMEOUT)
2463 {
2464 static unsigned s_cErrors = 0;
2465 if (s_cErrors++ < 32)
2466 LogRelFunc(("%s-bit call failed; rc=%Rrc\n",
2467 f32bit ? "32" : "64", rc));
2468 }
2469 else
2470 LogFlowFunc(("%s-bit call failed; rc=%Rrc\n",
2471 f32bit ? "32" : "64", rc));
2472 }
2473 return rc;
2474}
2475#endif /* VBOX_WITH_HGCM */
2476
2477
2478/**
2479 * Handle VBOXGUEST_IOCTL_CHECK_BALLOON from R3.
2480 *
2481 * Ask the host for the size of the balloon and try to set it accordingly. If
2482 * this approach fails because it's not supported, return with fHandleInR3 set
2483 * and let the user land supply memory we can lock via the other ioctl.
2484 *
2485 * @returns VBox status code.
2486 *
2487 * @param pDevExt The device extension.
2488 * @param pSession The session.
2489 * @param pInfo The output buffer.
2490 * @param pcbDataReturned Where to store the amount of returned data. Can
2491 * be NULL.
2492 */
2493static int VBoxGuestCommonIOCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2494 VBoxGuestCheckBalloonInfo *pInfo, size_t *pcbDataReturned)
2495{
2496 LogFlowFuncEnter();
2497
2498 int rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2499 AssertRCReturn(rc, rc);
2500
2501 /*
2502 * The first user trying to query/change the balloon becomes the
2503 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
2504 */
2505 if ( pDevExt->MemBalloon.pOwner != pSession
2506 && pDevExt->MemBalloon.pOwner == NULL)
2507 {
2508 pDevExt->MemBalloon.pOwner = pSession;
2509 }
2510
2511 if (pDevExt->MemBalloon.pOwner == pSession)
2512 {
2513 VMMDevGetMemBalloonChangeRequest *pReq;
2514 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest),
2515 VMMDevReq_GetMemBalloonChangeRequest);
2516 if (RT_SUCCESS(rc))
2517 {
2518 /*
2519 * This is a response to that event. Setting this bit means that
2520 * we request the value from the host and change the guest memory
2521 * balloon according to this value.
2522 */
2523 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
2524 rc = VbglGRPerform(&pReq->header);
2525 if (RT_SUCCESS(rc))
2526 {
2527 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
2528 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
2529
2530 pInfo->cBalloonChunks = pReq->cBalloonChunks;
2531 pInfo->fHandleInR3 = false;
2532
2533 rc = vboxGuestSetBalloonSizeKernel(pDevExt, pReq->cBalloonChunks, &pInfo->fHandleInR3);
2534 /* Ignore various out of memory failures. */
2535 if ( rc == VERR_NO_MEMORY
2536 || rc == VERR_NO_PHYS_MEMORY
2537 || rc == VERR_NO_CONT_MEMORY)
2538 rc = VINF_SUCCESS;
2539
2540 if (pcbDataReturned)
2541 *pcbDataReturned = sizeof(VBoxGuestCheckBalloonInfo);
2542 }
2543 else
2544 LogRelFunc(("VbglGRPerform failed; rc=%Rrc\n", rc));
2545 VbglGRFree(&pReq->header);
2546 }
2547 }
2548 else
2549 rc = VERR_PERMISSION_DENIED;
2550
2551 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2552
2553 LogFlowFunc(("Returns %Rrc\n", rc));
2554 return rc;
2555}
2556
2557
2558/**
2559 * Handle a request for changing the memory balloon.
2560 *
2561 * @returns VBox status code.
2562 *
2563 * @param pDevExt The device extention.
2564 * @param pSession The session.
2565 * @param pInfo The change request structure (input).
2566 * @param pcbDataReturned Where to store the amount of returned data. Can
2567 * be NULL.
2568 */
2569static int VBoxGuestCommonIOCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2570 VBoxGuestChangeBalloonInfo *pInfo, size_t *pcbDataReturned)
2571{
2572 int rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2573 AssertRCReturn(rc, rc);
2574
2575 if (!pDevExt->MemBalloon.fUseKernelAPI)
2576 {
2577 /*
2578 * The first user trying to query/change the balloon becomes the
2579 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
2580 */
2581 if ( pDevExt->MemBalloon.pOwner != pSession
2582 && pDevExt->MemBalloon.pOwner == NULL)
2583 pDevExt->MemBalloon.pOwner = pSession;
2584
2585 if (pDevExt->MemBalloon.pOwner == pSession)
2586 {
2587 rc = vboxGuestSetBalloonSizeFromUser(pDevExt, pSession, pInfo->u64ChunkAddr,
2588 !!pInfo->fInflate);
2589 if (pcbDataReturned)
2590 *pcbDataReturned = 0;
2591 }
2592 else
2593 rc = VERR_PERMISSION_DENIED;
2594 }
2595 else
2596 rc = VERR_PERMISSION_DENIED;
2597
2598 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2599 return rc;
2600}
2601
2602
2603/**
2604 * Handle a request for writing a core dump of the guest on the host.
2605 *
2606 * @returns VBox status code.
2607 *
2608 * @param pDevExt The device extension.
2609 * @param pInfo The output buffer.
2610 */
2611static int VBoxGuestCommonIOCtl_WriteCoreDump(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWriteCoreDump *pInfo)
2612{
2613 VMMDevReqWriteCoreDump *pReq = NULL;
2614 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqWriteCoreDump),
2615 VMMDevReq_WriteCoreDump);
2616 if (RT_FAILURE(rc))
2617 {
2618 LogFlowFunc(("Failed to allocate %u (%#x) bytes to cache the request; rc=%Rrc\n",
2619 sizeof(VMMDevReqWriteCoreDump), sizeof(VMMDevReqWriteCoreDump), rc));
2620 return rc;
2621 }
2622
2623 pReq->fFlags = pInfo->fFlags;
2624 rc = VbglGRPerform(&pReq->header);
2625 if (RT_FAILURE(rc))
2626 LogFlowFunc(("VbglGRPerform failed, rc=%Rrc\n", rc));
2627
2628 VbglGRFree(&pReq->header);
2629 return rc;
2630}
2631
2632
2633/**
2634 * Guest backdoor logging.
2635 *
2636 * @returns VBox status code.
2637 *
2638 * @param pDevExt The device extension.
2639 * @param pch The log message (need not be NULL terminated).
2640 * @param cbData Size of the buffer.
2641 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2642 */
2643static int VBoxGuestCommonIOCtl_Log(PVBOXGUESTDEVEXT pDevExt, const char *pch, size_t cbData, size_t *pcbDataReturned, bool fUserSession)
2644{
2645 NOREF(pch);
2646 NOREF(cbData);
2647 if (pDevExt->fLoggingEnabled)
2648 RTLogBackdoorPrintf("%.*s", cbData, pch);
2649 else if (!fUserSession)
2650 LogRel(("%.*s", cbData, pch));
2651 else
2652 Log(("%.*s", cbData, pch));
2653 if (pcbDataReturned)
2654 *pcbDataReturned = 0;
2655 return VINF_SUCCESS;
2656}
2657
2658static bool VBoxGuestCommonGuestCapsValidateValues(uint32_t fCaps)
2659{
2660 if (fCaps & (~(VMMDEV_GUEST_SUPPORTS_SEAMLESS | VMMDEV_GUEST_SUPPORTS_GUEST_HOST_WINDOW_MAPPING | VMMDEV_GUEST_SUPPORTS_GRAPHICS)))
2661 return false;
2662
2663 return true;
2664}
2665
2666/** Check whether any unreported VMM device events should be reported to any of
2667 * the currently listening sessions. In addition, report any events in
2668 * @a fGenFakeEvents.
2669 * @note This is called by GUEST_CAPS_ACQUIRE in case any pending events can now
2670 * be dispatched to the session which acquired capabilities. The fake
2671 * events are a hack to wake up threads in that session which would not
2672 * otherwise be woken.
2673 * @todo Why not just use CANCEL_ALL_WAITEVENTS to do the waking up rather than
2674 * adding additional code to the driver?
2675 * @todo Why does acquiring capabilities block and unblock events? Capabilities
2676 * are supposed to control what is reported to the host, we already have
2677 * separate requests for blocking and unblocking events. */
2678static void VBoxGuestCommonCheckEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fGenFakeEvents)
2679{
2680 RTSpinlockAcquire(pDevExt->EventSpinlock);
2681 uint32_t fEvents = fGenFakeEvents | pDevExt->f32PendingEvents;
2682 PVBOXGUESTWAIT pWait;
2683 PVBOXGUESTWAIT pSafe;
2684
2685 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
2686 {
2687 uint32_t fHandledEvents = VBoxGuestCommonGetHandledEventsLocked(pDevExt, pWait->pSession);
2688 if ( (pWait->fReqEvents & fEvents & fHandledEvents)
2689 && !pWait->fResEvents)
2690 {
2691 pWait->fResEvents = pWait->fReqEvents & fEvents & fHandledEvents;
2692 Assert(!(fGenFakeEvents & pWait->fResEvents) || pSession == pWait->pSession);
2693 fEvents &= ~pWait->fResEvents;
2694 RTListNodeRemove(&pWait->ListNode);
2695#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2696 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
2697#else
2698 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
2699 int rc = RTSemEventMultiSignal(pWait->Event);
2700 AssertRC(rc);
2701#endif
2702 if (!fEvents)
2703 break;
2704 }
2705 }
2706 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
2707
2708 RTSpinlockRelease(pDevExt->EventSpinlock);
2709
2710#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2711 VBoxGuestWaitDoWakeUps(pDevExt);
2712#endif
2713}
2714
2715/** Switch the capabilities in @a fOrMask to "acquire" mode if they are not
2716 * already in "set" mode. If @a enmFlags is not set to
2717 * VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE, also try to acquire those
2718 * capabilities for the current session and release those in @a fNotFlag. */
2719static int VBoxGuestCommonGuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fOrMask, uint32_t fNotMask, VBOXGUESTCAPSACQUIRE_FLAGS enmFlags)
2720{
2721 uint32_t fSetCaps = 0;
2722
2723 if (!VBoxGuestCommonGuestCapsValidateValues(fOrMask))
2724 {
2725 LogRelFunc(("pSession(0x%p), OR(0x%x), NOT(0x%x), flags(0x%x) -- invalid fOrMask\n",
2726 pSession, fOrMask, fNotMask, enmFlags));
2727 return VERR_INVALID_PARAMETER;
2728 }
2729
2730 if ( enmFlags != VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE
2731 && enmFlags != VBOXGUESTCAPSACQUIRE_FLAGS_NONE)
2732 {
2733 LogRelFunc(("pSession(0x%p), OR(0x%x), NOT(0x%x), flags(0x%x) -- invalid enmFlags %d\n",
2734 pSession, fOrMask, fNotMask, enmFlags));
2735 return VERR_INVALID_PARAMETER;
2736 }
2737
2738 if (!VBoxGuestCommonGuestCapsModeSet(pDevExt, fOrMask, true, &fSetCaps))
2739 {
2740 LogRelFunc(("pSession(0x%p), OR(0x%x), NOT(0x%x), flags(0x%x) -- calling caps acquire for set caps\n",
2741 pSession, fOrMask, fNotMask, enmFlags));
2742 return VERR_INVALID_STATE;
2743 }
2744
2745 if (enmFlags & VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE)
2746 {
2747 LogRelFunc(("pSession(0x%p), OR(0x%x), NOT(0x%x), flags(0x%x) -- configured acquire caps: 0x%x\n",
2748 pSession, fOrMask, fNotMask, enmFlags));
2749 return VINF_SUCCESS;
2750 }
2751
2752 /* the fNotMask no need to have all values valid,
2753 * invalid ones will simply be ignored */
2754 uint32_t fCurrentOwnedCaps;
2755 uint32_t fSessionNotCaps;
2756 uint32_t fSessionOrCaps;
2757 uint32_t fOtherConflictingCaps;
2758
2759 fNotMask &= ~fOrMask;
2760
2761 RTSpinlockAcquire(pDevExt->EventSpinlock);
2762
2763 fCurrentOwnedCaps = pSession->u32AquiredGuestCaps;
2764 fSessionNotCaps = fCurrentOwnedCaps & fNotMask;
2765 fSessionOrCaps = fOrMask & ~fCurrentOwnedCaps;
2766 fOtherConflictingCaps = pDevExt->u32GuestCaps & ~fCurrentOwnedCaps;
2767 fOtherConflictingCaps &= fSessionOrCaps;
2768
2769 if (!fOtherConflictingCaps)
2770 {
2771 if (fSessionOrCaps)
2772 {
2773 pSession->u32AquiredGuestCaps |= fSessionOrCaps;
2774 pDevExt->u32GuestCaps |= fSessionOrCaps;
2775 }
2776
2777 if (fSessionNotCaps)
2778 {
2779 pSession->u32AquiredGuestCaps &= ~fSessionNotCaps;
2780 pDevExt->u32GuestCaps &= ~fSessionNotCaps;
2781 }
2782 }
2783
2784 RTSpinlockRelease(pDevExt->EventSpinlock);
2785
2786 if (fOtherConflictingCaps)
2787 {
2788 LogFlowFunc(("Caps 0x%x were busy\n", fOtherConflictingCaps));
2789 return VERR_RESOURCE_BUSY;
2790 }
2791
2792 /* now do host notification outside the lock */
2793 if (!fSessionOrCaps && !fSessionNotCaps)
2794 {
2795 /* no changes, return */
2796 return VINF_SUCCESS;
2797 }
2798
2799 int rc = VBoxGuestSetGuestCapabilities(fSessionOrCaps, fSessionNotCaps);
2800 if (RT_FAILURE(rc))
2801 {
2802 LogRelFunc(("VBoxGuestSetGuestCapabilities failed, rc=%Rrc\n", rc));
2803
2804 /* Failure branch
2805 * this is generally bad since e.g. failure to release the caps may result in other sessions not being able to use it
2806 * so we are not trying to restore the caps back to their values before the VBoxGuestCommonGuestCapsAcquire call,
2807 * but just pretend everithing is OK.
2808 * @todo: better failure handling mechanism? */
2809 }
2810
2811 /* success! */
2812 uint32_t fGenFakeEvents = 0;
2813
2814 if (fSessionOrCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
2815 {
2816 /* generate the seamless change event so that the r3 app could synch with the seamless state
2817 * although this introduces a false alarming of r3 client, it still solve the problem of
2818 * client state inconsistency in multiuser environment */
2819 fGenFakeEvents |= VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
2820 }
2821
2822 /* since the acquire filter mask has changed, we need to process events in any way to ensure they go from pending events field
2823 * to the proper (un-filtered) entries */
2824 VBoxGuestCommonCheckEvents(pDevExt, pSession, fGenFakeEvents);
2825
2826 return VINF_SUCCESS;
2827}
2828
2829static int VBoxGuestCommonIOCTL_GuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestCapsAquire *pAcquire)
2830{
2831 int rc = VBoxGuestCommonGuestCapsAcquire(pDevExt, pSession, pAcquire->u32OrMask, pAcquire->u32NotMask, pAcquire->enmFlags);
2832 if (RT_FAILURE(rc))
2833 LogRelFunc(("Failed, rc=%Rrc\n", rc));
2834 pAcquire->rc = rc;
2835 return VINF_SUCCESS;
2836}
2837
2838
2839/**
2840 * Common IOCtl for user to kernel and kernel to kernel communication.
2841 *
2842 * This function only does the basic validation and then invokes
2843 * worker functions that takes care of each specific function.
2844 *
2845 * @returns VBox status code.
2846 *
2847 * @param iFunction The requested function.
2848 * @param pDevExt The device extension.
2849 * @param pSession The client session.
2850 * @param pvData The input/output data buffer. Can be NULL depending on the function.
2851 * @param cbData The max size of the data buffer.
2852 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2853 */
2854int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2855 void *pvData, size_t cbData, size_t *pcbDataReturned)
2856{
2857 int rc;
2858 LogFlowFunc(("iFunction=%#x, pDevExt=%p, pSession=%p, pvData=%p, cbData=%zu\n",
2859 iFunction, pDevExt, pSession, pvData, cbData));
2860
2861 /*
2862 * Make sure the returned data size is set to zero.
2863 */
2864 if (pcbDataReturned)
2865 *pcbDataReturned = 0;
2866
2867 /*
2868 * Define some helper macros to simplify validation.
2869 */
2870#define CHECKRET_RING0(mnemonic) \
2871 do { \
2872 if (pSession->R0Process != NIL_RTR0PROCESS) \
2873 { \
2874 LogFunc((mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
2875 pSession->Process, (uintptr_t)pSession->R0Process)); \
2876 return VERR_PERMISSION_DENIED; \
2877 } \
2878 } while (0)
2879#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
2880 do { \
2881 if (cbData < (cbMin)) \
2882 { \
2883 LogFunc((mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
2884 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
2885 return VERR_BUFFER_OVERFLOW; \
2886 } \
2887 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
2888 { \
2889 LogFunc((mnemonic ": Invalid pointer %p\n", pvData)); \
2890 return VERR_INVALID_POINTER; \
2891 } \
2892 } while (0)
2893#define CHECKRET_SIZE(mnemonic, cb) \
2894 do { \
2895 if (cbData != (cb)) \
2896 { \
2897 LogFunc((mnemonic ": cbData=%#zx (%zu) expected is %#zx (%zu)\n", \
2898 cbData, cbData, (size_t)(cb), (size_t)(cb))); \
2899 return VERR_BUFFER_OVERFLOW; \
2900 } \
2901 if ((cb) != 0 && !VALID_PTR(pvData)) \
2902 { \
2903 LogFunc((mnemonic ": Invalid pointer %p\n", pvData)); \
2904 return VERR_INVALID_POINTER; \
2905 } \
2906 } while (0)
2907
2908
2909 /*
2910 * Deal with variably sized requests first.
2911 */
2912 rc = VINF_SUCCESS;
2913 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
2914 {
2915 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
2916 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
2917 }
2918#ifdef VBOX_WITH_HGCM
2919 /*
2920 * These ones are a bit tricky.
2921 */
2922 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
2923 {
2924 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2925 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2926 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2927 fInterruptible, false /*f32bit*/, false /* fUserData */,
2928 0, cbData, pcbDataReturned);
2929 }
2930 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
2931 {
2932 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2933 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2934 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2935 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2936 false /*f32bit*/, false /* fUserData */,
2937 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2938 }
2939 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_USERDATA(0)))
2940 {
2941 bool fInterruptible = true;
2942 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2943 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2944 fInterruptible, false /*f32bit*/, true /* fUserData */,
2945 0, cbData, pcbDataReturned);
2946 }
2947# ifdef RT_ARCH_AMD64
2948 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
2949 {
2950 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2951 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2952 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2953 fInterruptible, true /*f32bit*/, false /* fUserData */,
2954 0, cbData, pcbDataReturned);
2955 }
2956 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
2957 {
2958 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2959 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2960 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2961 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2962 true /*f32bit*/, false /* fUserData */,
2963 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2964 }
2965# endif
2966#endif /* VBOX_WITH_HGCM */
2967 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
2968 {
2969 CHECKRET_MIN_SIZE("LOG", 1);
2970 rc = VBoxGuestCommonIOCtl_Log(pDevExt, (char *)pvData, cbData, pcbDataReturned, pSession->fUserSession);
2971 }
2972 else
2973 {
2974 switch (iFunction)
2975 {
2976 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
2977 CHECKRET_RING0("GETVMMDEVPORT");
2978 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
2979 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
2980 break;
2981
2982#ifndef RT_OS_WINDOWS /* Windows has its own implementation of this. */
2983 case VBOXGUEST_IOCTL_SET_MOUSE_NOTIFY_CALLBACK:
2984 CHECKRET_RING0("SET_MOUSE_NOTIFY_CALLBACK");
2985 CHECKRET_SIZE("SET_MOUSE_NOTIFY_CALLBACK", sizeof(VBoxGuestMouseSetNotifyCallback));
2986 rc = VBoxGuestCommonIOCtl_SetMouseNotifyCallback(pDevExt, (VBoxGuestMouseSetNotifyCallback *)pvData);
2987 break;
2988#endif
2989
2990 case VBOXGUEST_IOCTL_WAITEVENT:
2991 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
2992 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, pSession, (VBoxGuestWaitEventInfo *)pvData,
2993 pcbDataReturned, pSession->R0Process != NIL_RTR0PROCESS);
2994 break;
2995
2996 case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
2997 if (cbData != 0)
2998 rc = VERR_INVALID_PARAMETER;
2999 rc = VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
3000 break;
3001
3002 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
3003 CHECKRET_MIN_SIZE("CTL_FILTER_MASK",
3004 sizeof(VBoxGuestFilterMaskInfo));
3005 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, pSession,
3006 (VBoxGuestFilterMaskInfo *)pvData);
3007 break;
3008
3009#ifdef VBOX_WITH_HGCM
3010 case VBOXGUEST_IOCTL_HGCM_CONNECT:
3011# ifdef RT_ARCH_AMD64
3012 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
3013# endif
3014 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
3015 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
3016 break;
3017
3018 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
3019# ifdef RT_ARCH_AMD64
3020 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
3021# endif
3022 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
3023 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
3024 break;
3025#endif /* VBOX_WITH_HGCM */
3026
3027 case VBOXGUEST_IOCTL_CHECK_BALLOON:
3028 CHECKRET_MIN_SIZE("CHECK_MEMORY_BALLOON", sizeof(VBoxGuestCheckBalloonInfo));
3029 rc = VBoxGuestCommonIOCtl_CheckMemoryBalloon(pDevExt, pSession, (VBoxGuestCheckBalloonInfo *)pvData, pcbDataReturned);
3030 break;
3031
3032 case VBOXGUEST_IOCTL_CHANGE_BALLOON:
3033 CHECKRET_MIN_SIZE("CHANGE_MEMORY_BALLOON", sizeof(VBoxGuestChangeBalloonInfo));
3034 rc = VBoxGuestCommonIOCtl_ChangeMemoryBalloon(pDevExt, pSession, (VBoxGuestChangeBalloonInfo *)pvData, pcbDataReturned);
3035 break;
3036
3037 case VBOXGUEST_IOCTL_WRITE_CORE_DUMP:
3038 CHECKRET_MIN_SIZE("WRITE_CORE_DUMP", sizeof(VBoxGuestWriteCoreDump));
3039 rc = VBoxGuestCommonIOCtl_WriteCoreDump(pDevExt, (VBoxGuestWriteCoreDump *)pvData);
3040 break;
3041
3042 case VBOXGUEST_IOCTL_SET_MOUSE_STATUS:
3043 CHECKRET_SIZE("SET_MOUSE_STATUS", sizeof(uint32_t));
3044 rc = vboxGuestCommonIOCtl_SetMouseStatus(pDevExt, pSession,
3045 *(uint32_t *)pvData);
3046 break;
3047
3048#ifdef VBOX_WITH_DPC_LATENCY_CHECKER
3049 case VBOXGUEST_IOCTL_DPC_LATENCY_CHECKER:
3050 CHECKRET_SIZE("DPC_LATENCY_CHECKER", 0);
3051 rc = VbgdNtIOCtl_DpcLatencyChecker();
3052 break;
3053#endif
3054
3055 case VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE:
3056 CHECKRET_SIZE("GUEST_CAPS_ACQUIRE", sizeof(VBoxGuestCapsAquire));
3057 rc = VBoxGuestCommonIOCTL_GuestCapsAcquire(pDevExt, pSession, (VBoxGuestCapsAquire*)pvData);
3058 *pcbDataReturned = sizeof(VBoxGuestCapsAquire);
3059 break;
3060
3061 case VBOXGUEST_IOCTL_SET_GUEST_CAPABILITIES:
3062 CHECKRET_MIN_SIZE("SET_GUEST_CAPABILITIES",
3063 sizeof(VBoxGuestSetCapabilitiesInfo));
3064 rc = VBoxGuestCommonIOCtl_SetCapabilities(pDevExt, pSession,
3065 (VBoxGuestSetCapabilitiesInfo *)pvData);
3066 break;
3067
3068 default:
3069 {
3070 LogRelFunc(("Unknown request iFunction=%#x, stripped size=%#x\n",
3071 iFunction, VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
3072 rc = VERR_NOT_SUPPORTED;
3073 break;
3074 }
3075 }
3076 }
3077
3078 LogFlowFunc(("Returning %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
3079 return rc;
3080}
3081
3082
3083
3084/**
3085 * Common interrupt service routine.
3086 *
3087 * This deals with events and with waking up thread waiting for those events.
3088 *
3089 * @returns true if it was our interrupt, false if it wasn't.
3090 * @param pDevExt The VBoxGuest device extension.
3091 */
3092bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
3093{
3094 bool fMousePositionChanged = false;
3095 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
3096 int rc = 0;
3097 bool fOurIrq;
3098
3099 /*
3100 * Make sure we've initialized the device extension.
3101 */
3102 if (RT_UNLIKELY(!pReq))
3103 return false;
3104
3105 /*
3106 * Enter the spinlock and check if it's our IRQ or not.
3107 */
3108 RTSpinlockAcquire(pDevExt->EventSpinlock);
3109 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
3110 if (fOurIrq)
3111 {
3112 /*
3113 * Acknowlegde events.
3114 * We don't use VbglGRPerform here as it may take another spinlocks.
3115 */
3116 pReq->header.rc = VERR_INTERNAL_ERROR;
3117 pReq->events = 0;
3118 ASMCompilerBarrier();
3119 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
3120 ASMCompilerBarrier(); /* paranoia */
3121 if (RT_SUCCESS(pReq->header.rc))
3122 {
3123 uint32_t fEvents = pReq->events;
3124 PVBOXGUESTWAIT pWait;
3125 PVBOXGUESTWAIT pSafe;
3126
3127#ifndef DEBUG_andy
3128 LogFlowFunc(("Acknowledge events succeeded: %#RX32\n", fEvents));
3129#endif
3130 /*
3131 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
3132 */
3133 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
3134 {
3135 fMousePositionChanged = true;
3136 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
3137#ifndef RT_OS_WINDOWS
3138 if (pDevExt->MouseNotifyCallback.pfnNotify)
3139 pDevExt->MouseNotifyCallback.pfnNotify
3140 (pDevExt->MouseNotifyCallback.pvUser);
3141#endif
3142 }
3143
3144#ifdef VBOX_WITH_HGCM
3145 /*
3146 * The HGCM event/list is kind of different in that we evaluate all entries.
3147 */
3148 if (fEvents & VMMDEV_EVENT_HGCM)
3149 {
3150 RTListForEachSafe(&pDevExt->HGCMWaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
3151 {
3152 if (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE)
3153 {
3154 pWait->fResEvents = VMMDEV_EVENT_HGCM;
3155 RTListNodeRemove(&pWait->ListNode);
3156# ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
3157 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
3158# else
3159 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
3160 rc |= RTSemEventMultiSignal(pWait->Event);
3161# endif
3162 }
3163 }
3164 fEvents &= ~VMMDEV_EVENT_HGCM;
3165 }
3166#endif
3167
3168 /*
3169 * Normal FIFO waiter evaluation.
3170 */
3171 fEvents |= pDevExt->f32PendingEvents;
3172 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
3173 {
3174 uint32_t fHandledEvents = VBoxGuestCommonGetHandledEventsLocked(pDevExt, pWait->pSession);
3175 if ( (pWait->fReqEvents & fEvents & fHandledEvents)
3176 && !pWait->fResEvents)
3177 {
3178 pWait->fResEvents = pWait->fReqEvents & fEvents & fHandledEvents;
3179 fEvents &= ~pWait->fResEvents;
3180 RTListNodeRemove(&pWait->ListNode);
3181#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
3182 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
3183#else
3184 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
3185 rc |= RTSemEventMultiSignal(pWait->Event);
3186#endif
3187 if (!fEvents)
3188 break;
3189 }
3190 }
3191 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
3192 }
3193 else /* something is serious wrong... */
3194 LogFlowFunc(("Acknowledging events failed, rc=%Rrc (events=%#x)\n",
3195 pReq->header.rc, pReq->events));
3196 }
3197#ifndef DEBUG_andy
3198 else
3199 LogFlowFunc(("Not ours\n"));
3200#endif
3201
3202 RTSpinlockRelease(pDevExt->EventSpinlock);
3203
3204#if defined(VBOXGUEST_USE_DEFERRED_WAKE_UP) && !defined(RT_OS_DARWIN) && !defined(RT_OS_WINDOWS)
3205 /*
3206 * Do wake-ups.
3207 * Note. On Windows this isn't possible at this IRQL, so a DPC will take
3208 * care of it. Same on darwin, doing it in the work loop callback.
3209 */
3210 VBoxGuestWaitDoWakeUps(pDevExt);
3211#endif
3212
3213 /*
3214 * Work the poll and async notification queues on OSes that implements that.
3215 * (Do this outside the spinlock to prevent some recursive spinlocking.)
3216 */
3217 if (fMousePositionChanged)
3218 {
3219 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
3220 VBoxGuestNativeISRMousePollEvent(pDevExt);
3221 }
3222
3223 Assert(rc == 0);
3224 NOREF(rc);
3225 return fOurIrq;
3226}
3227
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette