VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 47294

Last change on this file since 47294 was 47294, checked in by vboxsync, 12 years ago

Guest user state reporting: Update.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 109.4 KB
Line 
1/* $Id: VBoxGuest.cpp 47294 2013-07-22 11:19:20Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#define LOG_GROUP LOG_GROUP_DEFAULT
32#include "VBoxGuestInternal.h"
33#include "VBoxGuest2.h"
34#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
35#include <VBox/log.h>
36#include <iprt/mem.h>
37#include <iprt/time.h>
38#include <iprt/memobj.h>
39#include <iprt/asm.h>
40#include <iprt/asm-amd64-x86.h>
41#include <iprt/string.h>
42#include <iprt/process.h>
43#include <iprt/assert.h>
44#include <iprt/param.h>
45#ifdef VBOX_WITH_HGCM
46# include <iprt/thread.h>
47#endif
48#include "version-generated.h"
49#if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
50# include "revision-generated.h"
51#endif
52#ifdef RT_OS_WINDOWS
53# ifndef CTL_CODE
54# include <Windows.h>
55# endif
56#endif
57#if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
58# include <iprt/rand.h>
59#endif
60
61
62/*******************************************************************************
63* Internal Functions *
64*******************************************************************************/
65#ifdef VBOX_WITH_HGCM
66static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
67#endif
68#ifdef DEBUG
69static void testSetMouseStatus(void);
70#endif
71static int VBoxGuestCommonIOCtl_SetMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fFeatures);
72
73static int VBoxGuestCommonGuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fOrMask, uint32_t fNotMask, VBOXGUESTCAPSACQUIRE_FLAGS enmFlags);
74
75#define VBOXGUEST_ACQUIRE_STYLE_EVENTS (VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST | VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST)
76
77DECLINLINE(uint32_t) VBoxGuestCommonGetHandledEventsLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
78{
79 if(!pDevExt->u32AcquireModeGuestCaps)
80 return VMMDEV_EVENT_VALID_EVENT_MASK;
81
82 uint32_t u32AllowedGuestCaps = pSession->u32AquiredGuestCaps | (VMMDEV_EVENT_VALID_EVENT_MASK & ~pDevExt->u32AcquireModeGuestCaps);
83 uint32_t u32CleanupEvents = VBOXGUEST_ACQUIRE_STYLE_EVENTS;
84 if (u32AllowedGuestCaps & VMMDEV_GUEST_SUPPORTS_GRAPHICS)
85 u32CleanupEvents &= ~VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST;
86 if (u32AllowedGuestCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
87 u32CleanupEvents &= ~VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
88
89 return VMMDEV_EVENT_VALID_EVENT_MASK & ~u32CleanupEvents;
90}
91
92DECLINLINE(uint32_t) VBoxGuestCommonGetAndCleanPendingEventsLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fReqEvents)
93{
94 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents & VBoxGuestCommonGetHandledEventsLocked(pDevExt, pSession);
95 if (fMatches)
96 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
97 return fMatches;
98}
99
100DECLINLINE(bool) VBoxGuestCommonGuestCapsModeSet(PVBOXGUESTDEVEXT pDevExt, uint32_t fCaps, bool fAcquire, uint32_t *pu32OtherVal)
101{
102 uint32_t *pVal = fAcquire ? &pDevExt->u32AcquireModeGuestCaps : &pDevExt->u32SetModeGuestCaps;
103 const uint32_t fNotVal = !fAcquire ? pDevExt->u32AcquireModeGuestCaps : pDevExt->u32SetModeGuestCaps;
104 bool fResult = true;
105 RTSpinlockAcquire(pDevExt->EventSpinlock);
106
107 if (!(fNotVal & fCaps))
108 *pVal |= fCaps;
109 else
110 {
111 AssertMsgFailed(("trying to change caps mode\n"));
112 fResult = false;
113 }
114
115 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
116
117 if (pu32OtherVal)
118 *pu32OtherVal = fNotVal;
119 return fResult;
120}
121
122/*******************************************************************************
123* Global Variables *
124*******************************************************************************/
125static const size_t cbChangeMemBalloonReq = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
126
127#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
128/**
129 * Drag in the rest of IRPT since we share it with the
130 * rest of the kernel modules on Solaris.
131 */
132PFNRT g_apfnVBoxGuestIPRTDeps[] =
133{
134 /* VirtioNet */
135 (PFNRT)RTRandBytes,
136 /* RTSemMutex* */
137 (PFNRT)RTSemMutexCreate,
138 (PFNRT)RTSemMutexDestroy,
139 (PFNRT)RTSemMutexRequest,
140 (PFNRT)RTSemMutexRequestNoResume,
141 (PFNRT)RTSemMutexRequestDebug,
142 (PFNRT)RTSemMutexRequestNoResumeDebug,
143 (PFNRT)RTSemMutexRelease,
144 (PFNRT)RTSemMutexIsOwned,
145 NULL
146};
147#endif /* RT_OS_DARWIN || RT_OS_SOLARIS */
148
149
150/**
151 * Reserves memory in which the VMM can relocate any guest mappings
152 * that are floating around.
153 *
154 * This operation is a little bit tricky since the VMM might not accept
155 * just any address because of address clashes between the three contexts
156 * it operates in, so use a small stack to perform this operation.
157 *
158 * @returns VBox status code (ignored).
159 * @param pDevExt The device extension.
160 */
161static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
162{
163 /*
164 * Query the required space.
165 */
166 VMMDevReqHypervisorInfo *pReq;
167 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
168 if (RT_FAILURE(rc))
169 return rc;
170 pReq->hypervisorStart = 0;
171 pReq->hypervisorSize = 0;
172 rc = VbglGRPerform(&pReq->header);
173 if (RT_FAILURE(rc)) /* this shouldn't happen! */
174 {
175 VbglGRFree(&pReq->header);
176 return rc;
177 }
178
179 /*
180 * The VMM will report back if there is nothing it wants to map, like for
181 * instance in VT-x and AMD-V mode.
182 */
183 if (pReq->hypervisorSize == 0)
184 Log(("vboxGuestInitFixateGuestMappings: nothing to do\n"));
185 else
186 {
187 /*
188 * We have to try several times since the host can be picky
189 * about certain addresses.
190 */
191 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
192 uint32_t cbHypervisor = pReq->hypervisorSize;
193 RTR0MEMOBJ ahTries[5];
194 uint32_t iTry;
195 bool fBitched = false;
196 Log(("vboxGuestInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
197 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
198 {
199 /*
200 * Reserve space, or if that isn't supported, create a object for
201 * some fictive physical memory and map that in to kernel space.
202 *
203 * To make the code a bit uglier, most systems cannot help with
204 * 4MB alignment, so we have to deal with that in addition to
205 * having two ways of getting the memory.
206 */
207 uint32_t uAlignment = _4M;
208 RTR0MEMOBJ hObj;
209 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
210 if (rc == VERR_NOT_SUPPORTED)
211 {
212 uAlignment = PAGE_SIZE;
213 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
214 }
215 /*
216 * If both RTR0MemObjReserveKernel calls above failed because either not supported or
217 * not implemented at all at the current platform, try to map the memory object into the
218 * virtual kernel space.
219 */
220 if (rc == VERR_NOT_SUPPORTED)
221 {
222 if (hFictive == NIL_RTR0MEMOBJ)
223 {
224 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
225 if (RT_FAILURE(rc))
226 break;
227 hFictive = hObj;
228 }
229 uAlignment = _4M;
230 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
231 if (rc == VERR_NOT_SUPPORTED)
232 {
233 uAlignment = PAGE_SIZE;
234 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
235 }
236 }
237 if (RT_FAILURE(rc))
238 {
239 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
240 rc, cbHypervisor, uAlignment, iTry));
241 fBitched = true;
242 break;
243 }
244
245 /*
246 * Try set it.
247 */
248 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
249 pReq->header.rc = VERR_INTERNAL_ERROR;
250 pReq->hypervisorSize = cbHypervisor;
251 pReq->hypervisorStart = (uintptr_t)RTR0MemObjAddress(hObj);
252 if ( uAlignment == PAGE_SIZE
253 && pReq->hypervisorStart & (_4M - 1))
254 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
255 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
256
257 rc = VbglGRPerform(&pReq->header);
258 if (RT_SUCCESS(rc))
259 {
260 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
261 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
262 RTR0MemObjAddress(pDevExt->hGuestMappings),
263 RTR0MemObjSize(pDevExt->hGuestMappings),
264 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
265 break;
266 }
267 ahTries[iTry] = hObj;
268 }
269
270 /*
271 * Cleanup failed attempts.
272 */
273 while (iTry-- > 0)
274 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
275 if ( RT_FAILURE(rc)
276 && hFictive != NIL_RTR0PTR)
277 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
278 if (RT_FAILURE(rc) && !fBitched)
279 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
280 }
281 VbglGRFree(&pReq->header);
282
283 /*
284 * We ignore failed attempts for now.
285 */
286 return VINF_SUCCESS;
287}
288
289
290/**
291 * Undo what vboxGuestInitFixateGuestMappings did.
292 *
293 * @param pDevExt The device extension.
294 */
295static void vboxGuestTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
296{
297 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
298 {
299 /*
300 * Tell the host that we're going to free the memory we reserved for
301 * it, the free it up. (Leak the memory if anything goes wrong here.)
302 */
303 VMMDevReqHypervisorInfo *pReq;
304 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
305 if (RT_SUCCESS(rc))
306 {
307 pReq->hypervisorStart = 0;
308 pReq->hypervisorSize = 0;
309 rc = VbglGRPerform(&pReq->header);
310 VbglGRFree(&pReq->header);
311 }
312 if (RT_SUCCESS(rc))
313 {
314 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
315 AssertRC(rc);
316 }
317 else
318 LogRel(("vboxGuestTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
319
320 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
321 }
322}
323
324
325/**
326 * Sets the interrupt filter mask during initialization and termination.
327 *
328 * This will ASSUME that we're the ones in carge over the mask, so
329 * we'll simply clear all bits we don't set.
330 *
331 * @returns VBox status code (ignored).
332 * @param pDevExt The device extension.
333 * @param fMask The new mask.
334 */
335static int vboxGuestSetFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask)
336{
337 VMMDevCtlGuestFilterMask *pReq;
338 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
339 if (RT_SUCCESS(rc))
340 {
341 pReq->u32OrMask = fMask;
342 pReq->u32NotMask = ~fMask;
343 rc = VbglGRPerform(&pReq->header);
344 if (RT_FAILURE(rc))
345 LogRel(("vboxGuestSetFilterMask: failed with rc=%Rrc\n", rc));
346 VbglGRFree(&pReq->header);
347 }
348 return rc;
349}
350
351
352/**
353 * Inflate the balloon by one chunk represented by an R0 memory object.
354 *
355 * The caller owns the balloon mutex.
356 *
357 * @returns IPRT status code.
358 * @param pMemObj Pointer to the R0 memory object.
359 * @param pReq The pre-allocated request for performing the VMMDev call.
360 */
361static int vboxGuestBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
362{
363 uint32_t iPage;
364 int rc;
365
366 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
367 {
368 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
369 pReq->aPhysPage[iPage] = phys;
370 }
371
372 pReq->fInflate = true;
373 pReq->header.size = cbChangeMemBalloonReq;
374 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
375
376 rc = VbglGRPerform(&pReq->header);
377 if (RT_FAILURE(rc))
378 LogRel(("vboxGuestBalloonInflate: VbglGRPerform failed. rc=%Rrc\n", rc));
379 return rc;
380}
381
382
383/**
384 * Deflate the balloon by one chunk - info the host and free the memory object.
385 *
386 * The caller owns the balloon mutex.
387 *
388 * @returns IPRT status code.
389 * @param pMemObj Pointer to the R0 memory object.
390 * The memory object will be freed afterwards.
391 * @param pReq The pre-allocated request for performing the VMMDev call.
392 */
393static int vboxGuestBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
394{
395 uint32_t iPage;
396 int rc;
397
398 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
399 {
400 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
401 pReq->aPhysPage[iPage] = phys;
402 }
403
404 pReq->fInflate = false;
405 pReq->header.size = cbChangeMemBalloonReq;
406 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
407
408 rc = VbglGRPerform(&pReq->header);
409 if (RT_FAILURE(rc))
410 {
411 LogRel(("vboxGuestBalloonDeflate: VbglGRPerform failed. rc=%Rrc\n", rc));
412 return rc;
413 }
414
415 rc = RTR0MemObjFree(*pMemObj, true);
416 if (RT_FAILURE(rc))
417 {
418 LogRel(("vboxGuestBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
419 return rc;
420 }
421
422 *pMemObj = NIL_RTR0MEMOBJ;
423 return VINF_SUCCESS;
424}
425
426
427/**
428 * Inflate/deflate the memory balloon and notify the host.
429 *
430 * This is a worker used by VBoxGuestCommonIOCtl_CheckMemoryBalloon - it takes
431 * the mutex.
432 *
433 * @returns VBox status code.
434 * @param pDevExt The device extension.
435 * @param pSession The session.
436 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
437 * @param pfHandleInR3 Where to return the handle-in-ring3 indicator
438 * (VINF_SUCCESS if set).
439 */
440static int vboxGuestSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, uint32_t *pfHandleInR3)
441{
442 int rc = VINF_SUCCESS;
443
444 if (pDevExt->MemBalloon.fUseKernelAPI)
445 {
446 VMMDevChangeMemBalloon *pReq;
447 uint32_t i;
448
449 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
450 {
451 LogRel(("vboxGuestSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
452 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
453 return VERR_INVALID_PARAMETER;
454 }
455
456 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
457 return VINF_SUCCESS; /* nothing to do */
458
459 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
460 && !pDevExt->MemBalloon.paMemObj)
461 {
462 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
463 if (!pDevExt->MemBalloon.paMemObj)
464 {
465 LogRel(("VBoxGuestSetBalloonSizeKernel: no memory for paMemObj!\n"));
466 return VERR_NO_MEMORY;
467 }
468 }
469
470 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
471 if (RT_FAILURE(rc))
472 return rc;
473
474 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
475 {
476 /* inflate */
477 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
478 {
479 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
480 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
481 if (RT_FAILURE(rc))
482 {
483 if (rc == VERR_NOT_SUPPORTED)
484 {
485 /* not supported -- fall back to the R3-allocated memory. */
486 rc = VINF_SUCCESS;
487 pDevExt->MemBalloon.fUseKernelAPI = false;
488 Assert(pDevExt->MemBalloon.cChunks == 0);
489 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
490 }
491 /* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
492 * cannot allocate more memory => don't try further, just stop here */
493 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
494 break;
495 }
496
497 rc = vboxGuestBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
498 if (RT_FAILURE(rc))
499 {
500 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
501 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
502 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
503 break;
504 }
505 pDevExt->MemBalloon.cChunks++;
506 }
507 }
508 else
509 {
510 /* deflate */
511 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
512 {
513 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
514 if (RT_FAILURE(rc))
515 {
516 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
517 break;
518 }
519 pDevExt->MemBalloon.cChunks--;
520 }
521 }
522
523 VbglGRFree(&pReq->header);
524 }
525
526 /*
527 * Set the handle-in-ring3 indicator. When set Ring-3 will have to work
528 * the balloon changes via the other API.
529 */
530 *pfHandleInR3 = pDevExt->MemBalloon.fUseKernelAPI ? false : true;
531
532 return rc;
533}
534
535
536/**
537 * Helper to reinit the VBoxVMM communication after hibernation.
538 *
539 * @returns VBox status code.
540 * @param pDevExt The device extension.
541 * @param enmOSType The OS type.
542 */
543int VBoxGuestReinitDevExtAfterHibernation(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
544{
545 int rc = VBoxGuestReportGuestInfo(enmOSType);
546 if (RT_SUCCESS(rc))
547 {
548 rc = VBoxGuestReportDriverStatus(true /* Driver is active */);
549 if (RT_FAILURE(rc))
550 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: could not report guest driver status, rc=%Rrc\n", rc));
551 }
552 else
553 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: could not report guest information to host, rc=%Rrc\n", rc));
554 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: returned with rc=%Rrc\n", rc));
555 return rc;
556}
557
558
559/**
560 * Inflate/deflate the balloon by one chunk.
561 *
562 * Worker for VBoxGuestCommonIOCtl_ChangeMemoryBalloon - it takes the mutex.
563 *
564 * @returns VBox status code.
565 * @param pDevExt The device extension.
566 * @param pSession The session.
567 * @param u64ChunkAddr The address of the chunk to add to / remove from the
568 * balloon.
569 * @param fInflate Inflate if true, deflate if false.
570 */
571static int vboxGuestSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
572 uint64_t u64ChunkAddr, bool fInflate)
573{
574 VMMDevChangeMemBalloon *pReq;
575 int rc = VINF_SUCCESS;
576 uint32_t i;
577 PRTR0MEMOBJ pMemObj = NULL;
578
579 if (fInflate)
580 {
581 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
582 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
583 {
584 LogRel(("vboxGuestSetBalloonSize: cannot inflate balloon, already have %u chunks (max=%u)\n",
585 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
586 return VERR_INVALID_PARAMETER;
587 }
588
589 if (!pDevExt->MemBalloon.paMemObj)
590 {
591 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
592 if (!pDevExt->MemBalloon.paMemObj)
593 {
594 LogRel(("VBoxGuestSetBalloonSizeFromUser: no memory for paMemObj!\n"));
595 return VERR_NO_MEMORY;
596 }
597 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
598 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
599 }
600 }
601 else
602 {
603 if (pDevExt->MemBalloon.cChunks == 0)
604 {
605 AssertMsgFailed(("vboxGuestSetBalloonSize: cannot decrease balloon, already at size 0\n"));
606 return VERR_INVALID_PARAMETER;
607 }
608 }
609
610 /*
611 * Enumerate all memory objects and check if the object is already registered.
612 */
613 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
614 {
615 if ( fInflate
616 && !pMemObj
617 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
618 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
619 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == u64ChunkAddr)
620 {
621 if (fInflate)
622 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
623 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
624 break;
625 }
626 }
627 if (!pMemObj)
628 {
629 if (fInflate)
630 {
631 /* no free object pointer found -- should not happen */
632 return VERR_NO_MEMORY;
633 }
634
635 /* cannot free this memory as it wasn't provided before */
636 return VERR_NOT_FOUND;
637 }
638
639 /*
640 * Try inflate / default the balloon as requested.
641 */
642 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
643 if (RT_FAILURE(rc))
644 return rc;
645
646 if (fInflate)
647 {
648 rc = RTR0MemObjLockUser(pMemObj, (RTR3PTR)u64ChunkAddr, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
649 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
650 if (RT_SUCCESS(rc))
651 {
652 rc = vboxGuestBalloonInflate(pMemObj, pReq);
653 if (RT_SUCCESS(rc))
654 pDevExt->MemBalloon.cChunks++;
655 else
656 {
657 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
658 RTR0MemObjFree(*pMemObj, true);
659 *pMemObj = NIL_RTR0MEMOBJ;
660 }
661 }
662 }
663 else
664 {
665 rc = vboxGuestBalloonDeflate(pMemObj, pReq);
666 if (RT_SUCCESS(rc))
667 pDevExt->MemBalloon.cChunks--;
668 else
669 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
670 }
671
672 VbglGRFree(&pReq->header);
673 return rc;
674}
675
676
677/**
678 * Cleanup the memory balloon of a session.
679 *
680 * Will request the balloon mutex, so it must be valid and the caller must not
681 * own it already.
682 *
683 * @param pDevExt The device extension.
684 * @param pDevExt The session. Can be NULL at unload.
685 */
686static void vboxGuestCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
687{
688 RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
689 if ( pDevExt->MemBalloon.pOwner == pSession
690 || pSession == NULL /*unload*/)
691 {
692 if (pDevExt->MemBalloon.paMemObj)
693 {
694 VMMDevChangeMemBalloon *pReq;
695 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
696 if (RT_SUCCESS(rc))
697 {
698 uint32_t i;
699 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
700 {
701 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
702 if (RT_FAILURE(rc))
703 {
704 LogRel(("vboxGuestCloseMemBalloon: Deflate failed with rc=%Rrc. Will leak %u chunks.\n",
705 rc, pDevExt->MemBalloon.cChunks));
706 break;
707 }
708 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
709 pDevExt->MemBalloon.cChunks--;
710 }
711 VbglGRFree(&pReq->header);
712 }
713 else
714 LogRel(("vboxGuestCloseMemBalloon: Failed to allocate VMMDev request buffer (rc=%Rrc). Will leak %u chunks.\n",
715 rc, pDevExt->MemBalloon.cChunks));
716 RTMemFree(pDevExt->MemBalloon.paMemObj);
717 pDevExt->MemBalloon.paMemObj = NULL;
718 }
719
720 pDevExt->MemBalloon.pOwner = NULL;
721 }
722 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
723}
724
725
726/**
727 * Initializes the VBoxGuest device extension when the
728 * device driver is loaded.
729 *
730 * The native code locates the VMMDev on the PCI bus and retrieve
731 * the MMIO and I/O port ranges, this function will take care of
732 * mapping the MMIO memory (if present). Upon successful return
733 * the native code should set up the interrupt handler.
734 *
735 * @returns VBox status code.
736 *
737 * @param pDevExt The device extension. Allocated by the native code.
738 * @param IOPortBase The base of the I/O port range.
739 * @param pvMMIOBase The base of the MMIO memory mapping.
740 * This is optional, pass NULL if not present.
741 * @param cbMMIO The size of the MMIO memory mapping.
742 * This is optional, pass 0 if not present.
743 * @param enmOSType The guest OS type to report to the VMMDev.
744 * @param fFixedEvents Events that will be enabled upon init and no client
745 * will ever be allowed to mask.
746 */
747int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
748 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
749{
750 int rc, rc2;
751 unsigned i;
752
753 /*
754 * Adjust fFixedEvents.
755 */
756#ifdef VBOX_WITH_HGCM
757 fFixedEvents |= VMMDEV_EVENT_HGCM;
758#endif
759
760 /*
761 * Initialize the data.
762 */
763 pDevExt->IOPortBase = IOPortBase;
764 pDevExt->pVMMDevMemory = NULL;
765 pDevExt->fFixedEvents = fFixedEvents;
766 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
767 pDevExt->EventSpinlock = NIL_RTSPINLOCK;
768 pDevExt->pIrqAckEvents = NULL;
769 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
770 RTListInit(&pDevExt->WaitList);
771#ifdef VBOX_WITH_HGCM
772 RTListInit(&pDevExt->HGCMWaitList);
773#endif
774#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
775 RTListInit(&pDevExt->WakeUpList);
776#endif
777 RTListInit(&pDevExt->WokenUpList);
778 RTListInit(&pDevExt->FreeList);
779#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
780 pDevExt->fVRDPEnabled = false;
781#endif
782 pDevExt->fLoggingEnabled = false;
783 pDevExt->f32PendingEvents = 0;
784 pDevExt->u32MousePosChangedSeq = 0;
785 pDevExt->SessionSpinlock = NIL_RTSPINLOCK;
786 pDevExt->MemBalloon.hMtx = NIL_RTSEMFASTMUTEX;
787 pDevExt->MemBalloon.cChunks = 0;
788 pDevExt->MemBalloon.cMaxChunks = 0;
789 pDevExt->MemBalloon.fUseKernelAPI = true;
790 pDevExt->MemBalloon.paMemObj = NULL;
791 pDevExt->MemBalloon.pOwner = NULL;
792 for (i = 0; i < RT_ELEMENTS(pDevExt->acMouseFeatureUsage); ++i)
793 pDevExt->acMouseFeatureUsage[i] = 0;
794 pDevExt->fMouseStatus = 0;
795 pDevExt->MouseNotifyCallback.pfnNotify = NULL;
796 pDevExt->MouseNotifyCallback.pvUser = NULL;
797 pDevExt->cISR = 0;
798
799 /*
800 * If there is an MMIO region validate the version and size.
801 */
802 if (pvMMIOBase)
803 {
804 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
805 Assert(cbMMIO);
806 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
807 && pVMMDev->u32Size >= 32
808 && pVMMDev->u32Size <= cbMMIO)
809 {
810 pDevExt->pVMMDevMemory = pVMMDev;
811 Log(("VBoxGuestInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
812 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
813 }
814 else /* try live without it. */
815 LogRel(("VBoxGuestInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
816 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
817 }
818
819 pDevExt->u32AcquireModeGuestCaps = 0;
820 pDevExt->u32SetModeGuestCaps = 0;
821 pDevExt->u32GuestCaps = 0;
822
823 /*
824 * Create the wait and session spinlocks as well as the ballooning mutex.
825 */
826 rc = RTSpinlockCreate(&pDevExt->EventSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestEvent");
827 if (RT_SUCCESS(rc))
828 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestSession");
829 if (RT_FAILURE(rc))
830 {
831 LogRel(("VBoxGuestInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
832 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
833 RTSpinlockDestroy(pDevExt->EventSpinlock);
834 return rc;
835 }
836
837 rc = RTSemFastMutexCreate(&pDevExt->MemBalloon.hMtx);
838 if (RT_FAILURE(rc))
839 {
840 LogRel(("VBoxGuestInitDevExt: failed to create mutex, rc=%Rrc!\n", rc));
841 RTSpinlockDestroy(pDevExt->SessionSpinlock);
842 RTSpinlockDestroy(pDevExt->EventSpinlock);
843 return rc;
844 }
845
846 /*
847 * Initialize the guest library and report the guest info back to VMMDev,
848 * set the interrupt control filter mask, and fixate the guest mappings
849 * made by the VMM.
850 */
851 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
852 if (RT_SUCCESS(rc))
853 {
854 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
855 if (RT_SUCCESS(rc))
856 {
857 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
858 Assert(pDevExt->PhysIrqAckEvents != 0);
859
860 rc = VBoxGuestReportGuestInfo(enmOSType);
861 if (RT_SUCCESS(rc))
862 {
863 rc = vboxGuestSetFilterMask(pDevExt, fFixedEvents);
864 if (RT_SUCCESS(rc))
865 {
866 /*
867 * Disable guest graphics capability by default. The guest specific
868 * graphics driver will re-enable this when it is necessary.
869 */
870 rc = VBoxGuestSetGuestCapabilities(0, VMMDEV_GUEST_SUPPORTS_GRAPHICS);
871 if (RT_SUCCESS(rc))
872 {
873 vboxGuestInitFixateGuestMappings(pDevExt);
874
875#ifdef DEBUG
876 testSetMouseStatus(); /* Other tests? */
877#endif
878
879 rc = VBoxGuestReportDriverStatus(true /* Driver is active */);
880 if (RT_FAILURE(rc))
881 LogRel(("VBoxGuestInitDevExt: VBoxReportGuestDriverStatus failed, rc=%Rrc\n", rc));
882
883 Log(("VBoxGuestInitDevExt: returns success\n"));
884 return VINF_SUCCESS;
885 }
886
887 LogRel(("VBoxGuestInitDevExt: VBoxGuestSetGuestCapabilities failed, rc=%Rrc\n", rc));
888 }
889 else
890 LogRel(("VBoxGuestInitDevExt: vboxGuestSetFilterMask failed, rc=%Rrc\n", rc));
891 }
892 else
893 LogRel(("VBoxGuestInitDevExt: VBoxReportGuestInfo failed, rc=%Rrc\n", rc));
894 VbglGRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
895 }
896 else
897 LogRel(("VBoxGuestInitDevExt: VBoxGRAlloc failed, rc=%Rrc\n", rc));
898
899 VbglTerminate();
900 }
901 else
902 LogRel(("VBoxGuestInitDevExt: VbglInit failed, rc=%Rrc\n", rc));
903
904 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
905 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
906 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
907 return rc; /* (failed) */
908}
909
910
911/**
912 * Deletes all the items in a wait chain.
913 * @param pList The head of the chain.
914 */
915static void VBoxGuestDeleteWaitList(PRTLISTNODE pList)
916{
917 while (!RTListIsEmpty(pList))
918 {
919 int rc2;
920 PVBOXGUESTWAIT pWait = RTListGetFirst(pList, VBOXGUESTWAIT, ListNode);
921 RTListNodeRemove(&pWait->ListNode);
922
923 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
924 pWait->Event = NIL_RTSEMEVENTMULTI;
925 pWait->pSession = NULL;
926 RTMemFree(pWait);
927 }
928}
929
930
931/**
932 * Destroys the VBoxGuest device extension.
933 *
934 * The native code should call this before the driver is loaded,
935 * but don't call this on shutdown.
936 *
937 * @param pDevExt The device extension.
938 */
939void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
940{
941 int rc2;
942 Log(("VBoxGuestDeleteDevExt:\n"));
943 Log(("VBoxGuest: The additions driver is terminating.\n"));
944
945 /*
946 * Clean up the bits that involves the host first.
947 */
948 vboxGuestTermUnfixGuestMappings(pDevExt);
949 VBoxGuestSetGuestCapabilities(0, UINT32_MAX); /* clears all capabilities */
950 vboxGuestSetFilterMask(pDevExt, 0); /* filter all events */
951 vboxGuestCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
952
953 /*
954 * Cleanup all the other resources.
955 */
956 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
957 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
958 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
959
960 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
961#ifdef VBOX_WITH_HGCM
962 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
963#endif
964#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
965 VBoxGuestDeleteWaitList(&pDevExt->WakeUpList);
966#endif
967 VBoxGuestDeleteWaitList(&pDevExt->WokenUpList);
968 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
969
970 VbglTerminate();
971
972 pDevExt->pVMMDevMemory = NULL;
973
974 pDevExt->IOPortBase = 0;
975 pDevExt->pIrqAckEvents = NULL;
976}
977
978
979/**
980 * Creates a VBoxGuest user session.
981 *
982 * The native code calls this when a ring-3 client opens the device.
983 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
984 *
985 * @returns VBox status code.
986 * @param pDevExt The device extension.
987 * @param ppSession Where to store the session on success.
988 */
989int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
990{
991 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
992 if (RT_UNLIKELY(!pSession))
993 {
994 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
995 return VERR_NO_MEMORY;
996 }
997
998 pSession->Process = RTProcSelf();
999 pSession->R0Process = RTR0ProcHandleSelf();
1000 pSession->pDevExt = pDevExt;
1001
1002 *ppSession = pSession;
1003 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1004 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1005 return VINF_SUCCESS;
1006}
1007
1008
1009/**
1010 * Creates a VBoxGuest kernel session.
1011 *
1012 * The native code calls this when a ring-0 client connects to the device.
1013 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
1014 *
1015 * @returns VBox status code.
1016 * @param pDevExt The device extension.
1017 * @param ppSession Where to store the session on success.
1018 */
1019int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
1020{
1021 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
1022 if (RT_UNLIKELY(!pSession))
1023 {
1024 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
1025 return VERR_NO_MEMORY;
1026 }
1027
1028 pSession->Process = NIL_RTPROCESS;
1029 pSession->R0Process = NIL_RTR0PROCESS;
1030 pSession->pDevExt = pDevExt;
1031
1032 *ppSession = pSession;
1033 LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1034 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1035 return VINF_SUCCESS;
1036}
1037
1038static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession);
1039
1040/**
1041 * Closes a VBoxGuest session.
1042 *
1043 * @param pDevExt The device extension.
1044 * @param pSession The session to close (and free).
1045 */
1046void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1047{
1048 unsigned i; NOREF(i);
1049 Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1050 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1051
1052 VBoxGuestCommonGuestCapsAcquire(pDevExt, pSession, 0, UINT32_MAX, VBOXGUESTCAPSACQUIRE_FLAGS_NONE);
1053
1054 VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
1055
1056#ifdef VBOX_WITH_HGCM
1057 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1058 if (pSession->aHGCMClientIds[i])
1059 {
1060 VBoxGuestHGCMDisconnectInfo Info;
1061 Info.result = 0;
1062 Info.u32ClientID = pSession->aHGCMClientIds[i];
1063 pSession->aHGCMClientIds[i] = 0;
1064 Log(("VBoxGuestCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
1065 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1066 }
1067#endif
1068
1069 pSession->pDevExt = NULL;
1070 pSession->Process = NIL_RTPROCESS;
1071 pSession->R0Process = NIL_RTR0PROCESS;
1072 vboxGuestCloseMemBalloon(pDevExt, pSession);
1073 /* Reset any mouse status flags which the session may have set. */
1074 VBoxGuestCommonIOCtl_SetMouseStatus(pDevExt, pSession, 0);
1075 RTMemFree(pSession);
1076}
1077
1078
1079/**
1080 * Allocates a wait-for-event entry.
1081 *
1082 * @returns The wait-for-event entry.
1083 * @param pDevExt The device extension.
1084 * @param pSession The session that's allocating this. Can be NULL.
1085 */
1086static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1087{
1088 /*
1089 * Allocate it one way or the other.
1090 */
1091 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1092 if (pWait)
1093 {
1094 RTSpinlockAcquire(pDevExt->EventSpinlock);
1095
1096 pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1097 if (pWait)
1098 RTListNodeRemove(&pWait->ListNode);
1099
1100 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1101 }
1102 if (!pWait)
1103 {
1104 static unsigned s_cErrors = 0;
1105 int rc;
1106
1107 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1108 if (!pWait)
1109 {
1110 if (s_cErrors++ < 32)
1111 LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
1112 return NULL;
1113 }
1114
1115 rc = RTSemEventMultiCreate(&pWait->Event);
1116 if (RT_FAILURE(rc))
1117 {
1118 if (s_cErrors++ < 32)
1119 LogRel(("VBoxGuestCommonIOCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
1120 RTMemFree(pWait);
1121 return NULL;
1122 }
1123
1124 pWait->ListNode.pNext = NULL;
1125 pWait->ListNode.pPrev = NULL;
1126 }
1127
1128 /*
1129 * Zero members just as an precaution.
1130 */
1131 pWait->fReqEvents = 0;
1132 pWait->fResEvents = 0;
1133#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1134 pWait->fPendingWakeUp = false;
1135 pWait->fFreeMe = false;
1136#endif
1137 pWait->pSession = pSession;
1138#ifdef VBOX_WITH_HGCM
1139 pWait->pHGCMReq = NULL;
1140#endif
1141 RTSemEventMultiReset(pWait->Event);
1142 return pWait;
1143}
1144
1145
1146/**
1147 * Frees the wait-for-event entry.
1148 *
1149 * The caller must own the wait spinlock !
1150 * The entry must be in a list!
1151 *
1152 * @param pDevExt The device extension.
1153 * @param pWait The wait-for-event entry to free.
1154 */
1155static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1156{
1157 pWait->fReqEvents = 0;
1158 pWait->fResEvents = 0;
1159#ifdef VBOX_WITH_HGCM
1160 pWait->pHGCMReq = NULL;
1161#endif
1162#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1163 Assert(!pWait->fFreeMe);
1164 if (pWait->fPendingWakeUp)
1165 pWait->fFreeMe = true;
1166 else
1167#endif
1168 {
1169 RTListNodeRemove(&pWait->ListNode);
1170 RTListAppend(&pDevExt->FreeList, &pWait->ListNode);
1171 }
1172}
1173
1174
1175/**
1176 * Frees the wait-for-event entry.
1177 *
1178 * @param pDevExt The device extension.
1179 * @param pWait The wait-for-event entry to free.
1180 */
1181static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1182{
1183 RTSpinlockAcquire(pDevExt->EventSpinlock);
1184 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1185 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1186}
1187
1188
1189#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1190/**
1191 * Processes the wake-up list.
1192 *
1193 * All entries in the wake-up list gets signalled and moved to the woken-up
1194 * list.
1195 *
1196 * @param pDevExt The device extension.
1197 */
1198void VBoxGuestWaitDoWakeUps(PVBOXGUESTDEVEXT pDevExt)
1199{
1200 if (!RTListIsEmpty(&pDevExt->WakeUpList))
1201 {
1202 RTSpinlockAcquire(pDevExt->EventSpinlock);
1203 for (;;)
1204 {
1205 int rc;
1206 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->WakeUpList, VBOXGUESTWAIT, ListNode);
1207 if (!pWait)
1208 break;
1209 pWait->fPendingWakeUp = true;
1210 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1211
1212 rc = RTSemEventMultiSignal(pWait->Event);
1213 AssertRC(rc);
1214
1215 RTSpinlockAcquire(pDevExt->EventSpinlock);
1216 pWait->fPendingWakeUp = false;
1217 if (!pWait->fFreeMe)
1218 {
1219 RTListNodeRemove(&pWait->ListNode);
1220 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1221 }
1222 else
1223 {
1224 pWait->fFreeMe = false;
1225 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1226 }
1227 }
1228 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1229 }
1230}
1231#endif /* VBOXGUEST_USE_DEFERRED_WAKE_UP */
1232
1233
1234/**
1235 * Modifies the guest capabilities.
1236 *
1237 * Should be called during driver init and termination.
1238 *
1239 * @returns VBox status code.
1240 * @param fOr The Or mask (what to enable).
1241 * @param fNot The Not mask (what to disable).
1242 */
1243int VBoxGuestSetGuestCapabilities(uint32_t fOr, uint32_t fNot)
1244{
1245 VMMDevReqGuestCapabilities2 *pReq;
1246 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
1247 if (RT_FAILURE(rc))
1248 {
1249 Log(("VBoxGuestSetGuestCapabilities: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1250 sizeof(*pReq), sizeof(*pReq), rc));
1251 return rc;
1252 }
1253
1254 pReq->u32OrMask = fOr;
1255 pReq->u32NotMask = fNot;
1256
1257 rc = VbglGRPerform(&pReq->header);
1258 if (RT_FAILURE(rc))
1259 Log(("VBoxGuestSetGuestCapabilities: VbglGRPerform failed, rc=%Rrc!\n", rc));
1260
1261 VbglGRFree(&pReq->header);
1262 return rc;
1263}
1264
1265
1266/**
1267 * Implements the fast (no input or output) type of IOCtls.
1268 *
1269 * This is currently just a placeholder stub inherited from the support driver code.
1270 *
1271 * @returns VBox status code.
1272 * @param iFunction The IOCtl function number.
1273 * @param pDevExt The device extension.
1274 * @param pSession The session.
1275 */
1276int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1277{
1278 Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1279
1280 NOREF(iFunction);
1281 NOREF(pDevExt);
1282 NOREF(pSession);
1283 return VERR_NOT_SUPPORTED;
1284}
1285
1286
1287/**
1288 * Return the VMM device port.
1289 *
1290 * returns IPRT status code.
1291 * @param pDevExt The device extension.
1292 * @param pInfo The request info.
1293 * @param pcbDataReturned (out) contains the number of bytes to return.
1294 */
1295static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
1296{
1297 Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
1298 pInfo->portAddress = pDevExt->IOPortBase;
1299 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
1300 if (pcbDataReturned)
1301 *pcbDataReturned = sizeof(*pInfo);
1302 return VINF_SUCCESS;
1303}
1304
1305
1306#ifndef RT_OS_WINDOWS
1307/**
1308 * Set the callback for the kernel mouse handler.
1309 *
1310 * returns IPRT status code.
1311 * @param pDevExt The device extension.
1312 * @param pNotify The new callback information.
1313 * @note This function takes the session spinlock to update the callback
1314 * information, but the interrupt handler will not do this. To make
1315 * sure that the interrupt handler sees a consistent structure, we
1316 * set the function pointer to NULL before updating the data and only
1317 * set it to the correct value once the data is updated. Since the
1318 * interrupt handler executes atomically this ensures that the data is
1319 * valid if the function pointer is non-NULL.
1320 */
1321int VBoxGuestCommonIOCtl_SetMouseNotifyCallback(PVBOXGUESTDEVEXT pDevExt, VBoxGuestMouseSetNotifyCallback *pNotify)
1322{
1323 Log(("VBoxGuestCommonIOCtl: SET_MOUSE_NOTIFY_CALLBACK\n"));
1324
1325 RTSpinlockAcquire(pDevExt->EventSpinlock);
1326 pDevExt->MouseNotifyCallback = *pNotify;
1327 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1328
1329 /* Make sure no active ISR is referencing the old data - hacky but should be
1330 * effective. */
1331 while (pDevExt->cISR > 0)
1332 ASMNopPause();
1333
1334 return VINF_SUCCESS;
1335}
1336#endif
1337
1338
1339/**
1340 * Worker VBoxGuestCommonIOCtl_WaitEvent.
1341 *
1342 * The caller enters the spinlock, we leave it.
1343 *
1344 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
1345 */
1346DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestWaitEventInfo *pInfo,
1347 int iEvent, const uint32_t fReqEvents)
1348{
1349 uint32_t fMatches = VBoxGuestCommonGetAndCleanPendingEventsLocked(pDevExt, pSession, fReqEvents);
1350 if (fMatches)
1351 {
1352 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1353
1354 pInfo->u32EventFlagsOut = fMatches;
1355 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1356 if (fReqEvents & ~((uint32_t)1 << iEvent))
1357 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1358 else
1359 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1360 return VINF_SUCCESS;
1361 }
1362 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1363 return VERR_TIMEOUT;
1364}
1365
1366
1367static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1368 VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned, bool fInterruptible)
1369{
1370 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
1371 uint32_t fResEvents;
1372 int iEvent;
1373 PVBOXGUESTWAIT pWait;
1374 int rc;
1375
1376 pInfo->u32EventFlagsOut = 0;
1377 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1378 if (pcbDataReturned)
1379 *pcbDataReturned = sizeof(*pInfo);
1380
1381 /*
1382 * Copy and verify the input mask.
1383 */
1384 iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
1385 if (RT_UNLIKELY(iEvent < 0))
1386 {
1387 Log(("VBoxGuestCommonIOCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
1388 return VERR_INVALID_PARAMETER;
1389 }
1390
1391 /*
1392 * Check the condition up front, before doing the wait-for-event allocations.
1393 */
1394 RTSpinlockAcquire(pDevExt->EventSpinlock);
1395 rc = WaitEventCheckCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
1396 if (rc == VINF_SUCCESS)
1397 return rc;
1398
1399 if (!pInfo->u32TimeoutIn)
1400 {
1401 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1402 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
1403 return VERR_TIMEOUT;
1404 }
1405
1406 pWait = VBoxGuestWaitAlloc(pDevExt, pSession);
1407 if (!pWait)
1408 return VERR_NO_MEMORY;
1409 pWait->fReqEvents = fReqEvents;
1410
1411 /*
1412 * We've got the wait entry now, re-enter the spinlock and check for the condition.
1413 * If the wait condition is met, return.
1414 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
1415 */
1416 RTSpinlockAcquire(pDevExt->EventSpinlock);
1417 RTListAppend(&pDevExt->WaitList, &pWait->ListNode);
1418 rc = WaitEventCheckCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
1419 if (rc == VINF_SUCCESS)
1420 {
1421 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1422 return rc;
1423 }
1424
1425 if (fInterruptible)
1426 rc = RTSemEventMultiWaitNoResume(pWait->Event,
1427 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1428 else
1429 rc = RTSemEventMultiWait(pWait->Event,
1430 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1431
1432 /*
1433 * There is one special case here and that's when the semaphore is
1434 * destroyed upon device driver unload. This shouldn't happen of course,
1435 * but in case it does, just get out of here ASAP.
1436 */
1437 if (rc == VERR_SEM_DESTROYED)
1438 return rc;
1439
1440 /*
1441 * Unlink the wait item and dispose of it.
1442 */
1443 RTSpinlockAcquire(pDevExt->EventSpinlock);
1444 fResEvents = pWait->fResEvents;
1445 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1446 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1447
1448 /*
1449 * Now deal with the return code.
1450 */
1451 if ( fResEvents
1452 && fResEvents != UINT32_MAX)
1453 {
1454 pInfo->u32EventFlagsOut = fResEvents;
1455 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1456 if (fReqEvents & ~((uint32_t)1 << iEvent))
1457 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1458 else
1459 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1460 rc = VINF_SUCCESS;
1461 }
1462 else if ( fResEvents == UINT32_MAX
1463 || rc == VERR_INTERRUPTED)
1464 {
1465 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
1466 rc = VERR_INTERRUPTED;
1467 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
1468 }
1469 else if (rc == VERR_TIMEOUT)
1470 {
1471 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1472 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT (2)\n"));
1473 }
1474 else
1475 {
1476 if (RT_SUCCESS(rc))
1477 {
1478 static unsigned s_cErrors = 0;
1479 if (s_cErrors++ < 32)
1480 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
1481 rc = VERR_INTERNAL_ERROR;
1482 }
1483 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1484 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc\n", rc));
1485 }
1486
1487 return rc;
1488}
1489
1490
1491static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1492{
1493 PVBOXGUESTWAIT pWait;
1494 PVBOXGUESTWAIT pSafe;
1495 int rc = 0;
1496
1497 Log(("VBoxGuestCommonIOCtl: CANCEL_ALL_WAITEVENTS\n"));
1498
1499 /*
1500 * Walk the event list and wake up anyone with a matching session.
1501 */
1502 RTSpinlockAcquire(pDevExt->EventSpinlock);
1503 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
1504 {
1505 if (pWait->pSession == pSession)
1506 {
1507 pWait->fResEvents = UINT32_MAX;
1508 RTListNodeRemove(&pWait->ListNode);
1509#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1510 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
1511#else
1512 rc |= RTSemEventMultiSignal(pWait->Event);
1513 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1514#endif
1515 }
1516 }
1517 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1518 Assert(rc == 0);
1519
1520#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1521 VBoxGuestWaitDoWakeUps(pDevExt);
1522#endif
1523
1524 return VINF_SUCCESS;
1525}
1526
1527/**
1528 * Checks if the VMM request is allowed in the context of the given session.
1529 *
1530 * @returns VINF_SUCCESS or VERR_PERMISSION_DENIED.
1531 * @param pSession The calling session.
1532 * @param enmType The request type.
1533 * @param pReqHdr The request.
1534 */
1535static int VBoxGuestCheckIfVMMReqAllowed(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VMMDevRequestType enmType,
1536 VMMDevRequestHeader const *pReqHdr)
1537{
1538 /*
1539 * Categorize the request being made.
1540 */
1541 /** @todo This need quite some more work! */
1542 enum
1543 {
1544 kLevel_Invalid, kLevel_NoOne, kLevel_OnlyVBoxGuest, kLevel_OnlyKernel, kLevel_TrustedUsers, kLevel_AllUsers
1545 } enmRequired;
1546 switch (enmType)
1547 {
1548 /*
1549 * Deny access to anything we don't know or provide specialized I/O controls for.
1550 */
1551#ifdef VBOX_WITH_HGCM
1552 case VMMDevReq_HGCMConnect:
1553 case VMMDevReq_HGCMDisconnect:
1554# ifdef VBOX_WITH_64_BITS_GUESTS
1555 case VMMDevReq_HGCMCall32:
1556 case VMMDevReq_HGCMCall64:
1557# else
1558 case VMMDevReq_HGCMCall:
1559# endif /* VBOX_WITH_64_BITS_GUESTS */
1560 case VMMDevReq_HGCMCancel:
1561 case VMMDevReq_HGCMCancel2:
1562#endif /* VBOX_WITH_HGCM */
1563 default:
1564 enmRequired = kLevel_NoOne;
1565 break;
1566
1567 /*
1568 * There are a few things only this driver can do (and it doesn't use
1569 * the VMMRequst I/O control route anyway, but whatever).
1570 */
1571 case VMMDevReq_ReportGuestInfo:
1572 case VMMDevReq_ReportGuestInfo2:
1573 case VMMDevReq_GetHypervisorInfo:
1574 case VMMDevReq_SetHypervisorInfo:
1575 case VMMDevReq_RegisterPatchMemory:
1576 case VMMDevReq_DeregisterPatchMemory:
1577 case VMMDevReq_GetMemBalloonChangeRequest:
1578 enmRequired = kLevel_OnlyVBoxGuest;
1579 break;
1580
1581 /*
1582 * Trusted users apps only.
1583 */
1584 case VMMDevReq_QueryCredentials:
1585 case VMMDevReq_ReportCredentialsJudgement:
1586 case VMMDevReq_RegisterSharedModule:
1587 case VMMDevReq_UnregisterSharedModule:
1588 case VMMDevReq_WriteCoreDump:
1589 case VMMDevReq_GetCpuHotPlugRequest:
1590 case VMMDevReq_SetCpuHotPlugStatus:
1591 case VMMDevReq_CheckSharedModules:
1592 case VMMDevReq_GetPageSharingStatus:
1593 case VMMDevReq_DebugIsPageShared:
1594 case VMMDevReq_ReportGuestStats:
1595 case VMMDevReq_ReportGuestUserState:
1596 case VMMDevReq_GetStatisticsChangeRequest:
1597 case VMMDevReq_ChangeMemBalloon:
1598 enmRequired = kLevel_TrustedUsers;
1599 break;
1600
1601 /*
1602 * Anyone. But not for CapsAcquire mode
1603 */
1604 case VMMDevReq_SetGuestCapabilities:
1605 {
1606 VMMDevReqGuestCapabilities2 *pCaps = (VMMDevReqGuestCapabilities2*)pReqHdr;
1607 uint32_t fAcquireCaps = 0;
1608 if (!VBoxGuestCommonGuestCapsModeSet(pDevExt, pCaps->u32OrMask, false, &fAcquireCaps))
1609 {
1610 AssertFailed();
1611 LogRel(("calling caps set for acquired caps %d\n", pCaps->u32OrMask));
1612 enmRequired = kLevel_NoOne;
1613 break;
1614 }
1615 /* hack to adjust the notcaps.
1616 * @todo: move to a better place
1617 * user-mode apps are allowed to pass any mask to the notmask,
1618 * the driver cleans up them accordingly */
1619 pCaps->u32NotMask &= ~fAcquireCaps;
1620 /* do not break, make it fall through to the below enmRequired setting */
1621 }
1622 /*
1623 * Anyone.
1624 */
1625 case VMMDevReq_GetMouseStatus:
1626 case VMMDevReq_SetMouseStatus:
1627 case VMMDevReq_SetPointerShape:
1628 case VMMDevReq_GetHostVersion:
1629 case VMMDevReq_Idle:
1630 case VMMDevReq_GetHostTime:
1631 case VMMDevReq_SetPowerStatus:
1632 case VMMDevReq_AcknowledgeEvents:
1633 case VMMDevReq_CtlGuestFilterMask:
1634 case VMMDevReq_ReportGuestStatus:
1635 case VMMDevReq_GetDisplayChangeRequest:
1636 case VMMDevReq_VideoModeSupported:
1637 case VMMDevReq_GetHeightReduction:
1638 case VMMDevReq_GetDisplayChangeRequest2:
1639 case VMMDevReq_VideoModeSupported2:
1640 case VMMDevReq_VideoAccelEnable:
1641 case VMMDevReq_VideoAccelFlush:
1642 case VMMDevReq_VideoSetVisibleRegion:
1643 case VMMDevReq_GetDisplayChangeRequestEx:
1644 case VMMDevReq_GetSeamlessChangeRequest:
1645 case VMMDevReq_GetVRDPChangeRequest:
1646 case VMMDevReq_LogString:
1647 case VMMDevReq_GetSessionId:
1648 enmRequired = kLevel_AllUsers;
1649 break;
1650
1651 /*
1652 * Depends on the request parameters...
1653 */
1654 /** @todo this have to be changed into an I/O control and the facilities
1655 * tracked in the session so they can automatically be failed when the
1656 * session terminates without reporting the new status.
1657 *
1658 * The information presented by IGuest is not reliable without this! */
1659 case VMMDevReq_ReportGuestCapabilities:
1660 switch (((VMMDevReportGuestStatus const *)pReqHdr)->guestStatus.facility)
1661 {
1662 case VBoxGuestFacilityType_All:
1663 case VBoxGuestFacilityType_VBoxGuestDriver:
1664 enmRequired = kLevel_OnlyVBoxGuest;
1665 break;
1666 case VBoxGuestFacilityType_VBoxService:
1667 enmRequired = kLevel_TrustedUsers;
1668 break;
1669 case VBoxGuestFacilityType_VBoxTrayClient:
1670 case VBoxGuestFacilityType_Seamless:
1671 case VBoxGuestFacilityType_Graphics:
1672 default:
1673 enmRequired = kLevel_AllUsers;
1674 break;
1675 }
1676 break;
1677 }
1678
1679 /*
1680 * Check against the session.
1681 */
1682 switch (enmRequired)
1683 {
1684 default:
1685 case kLevel_NoOne:
1686 break;
1687 case kLevel_OnlyVBoxGuest:
1688 case kLevel_OnlyKernel:
1689 if (pSession->R0Process == NIL_RTR0PROCESS)
1690 return VINF_SUCCESS;
1691 break;
1692 case kLevel_TrustedUsers:
1693 case kLevel_AllUsers:
1694 return VINF_SUCCESS;
1695 }
1696
1697 return VERR_PERMISSION_DENIED;
1698}
1699
1700static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1701 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
1702{
1703 int rc;
1704 VMMDevRequestHeader *pReqCopy;
1705
1706 /*
1707 * Validate the header and request size.
1708 */
1709 const VMMDevRequestType enmType = pReqHdr->requestType;
1710 const uint32_t cbReq = pReqHdr->size;
1711 const uint32_t cbMinSize = vmmdevGetRequestSize(enmType);
1712
1713 Log(("VBoxGuestCommonIOCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
1714
1715 if (cbReq < cbMinSize)
1716 {
1717 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
1718 cbReq, cbMinSize, enmType));
1719 return VERR_INVALID_PARAMETER;
1720 }
1721 if (cbReq > cbData)
1722 {
1723 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
1724 cbData, cbReq, enmType));
1725 return VERR_INVALID_PARAMETER;
1726 }
1727 rc = VbglGRVerify(pReqHdr, cbData);
1728 if (RT_FAILURE(rc))
1729 {
1730 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc!!\n",
1731 cbData, cbReq, enmType, rc));
1732 return rc;
1733 }
1734
1735 rc = VBoxGuestCheckIfVMMReqAllowed(pDevExt, pSession, enmType, pReqHdr);
1736 if (RT_FAILURE(rc))
1737 {
1738 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: Operation not allowed! type=%#x rc=%Rrc\n", enmType, rc));
1739 return rc;
1740 }
1741
1742 /*
1743 * Make a copy of the request in the physical memory heap so
1744 * the VBoxGuestLibrary can more easily deal with the request.
1745 * (This is really a waste of time since the OS or the OS specific
1746 * code has already buffered or locked the input/output buffer, but
1747 * it does makes things a bit simpler wrt to phys address.)
1748 */
1749 rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
1750 if (RT_FAILURE(rc))
1751 {
1752 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1753 cbReq, cbReq, rc));
1754 return rc;
1755 }
1756 memcpy(pReqCopy, pReqHdr, cbReq);
1757
1758 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
1759 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
1760
1761 rc = VbglGRPerform(pReqCopy);
1762 if ( RT_SUCCESS(rc)
1763 && RT_SUCCESS(pReqCopy->rc))
1764 {
1765 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
1766 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
1767
1768 memcpy(pReqHdr, pReqCopy, cbReq);
1769 if (pcbDataReturned)
1770 *pcbDataReturned = cbReq;
1771 }
1772 else if (RT_FAILURE(rc))
1773 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
1774 else
1775 {
1776 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
1777 rc = pReqCopy->rc;
1778 }
1779
1780 VbglGRFree(pReqCopy);
1781 return rc;
1782}
1783
1784
1785static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, VBoxGuestFilterMaskInfo *pInfo)
1786{
1787 VMMDevCtlGuestFilterMask *pReq;
1788 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
1789 if (RT_FAILURE(rc))
1790 {
1791 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1792 sizeof(*pReq), sizeof(*pReq), rc));
1793 return rc;
1794 }
1795
1796 pReq->u32OrMask = pInfo->u32OrMask;
1797 pReq->u32NotMask = pInfo->u32NotMask;
1798 pReq->u32NotMask &= ~pDevExt->fFixedEvents; /* don't permit these to be cleared! */
1799 rc = VbglGRPerform(&pReq->header);
1800 if (RT_FAILURE(rc))
1801 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: VbglGRPerform failed, rc=%Rrc!\n", rc));
1802
1803 VbglGRFree(&pReq->header);
1804 return rc;
1805}
1806
1807#ifdef VBOX_WITH_HGCM
1808
1809AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
1810
1811/** Worker for VBoxGuestHGCMAsyncWaitCallback*. */
1812static int VBoxGuestHGCMAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
1813 bool fInterruptible, uint32_t cMillies)
1814{
1815 int rc;
1816
1817 /*
1818 * Check to see if the condition was met by the time we got here.
1819 *
1820 * We create a simple poll loop here for dealing with out-of-memory
1821 * conditions since the caller isn't necessarily able to deal with
1822 * us returning too early.
1823 */
1824 PVBOXGUESTWAIT pWait;
1825 for (;;)
1826 {
1827 RTSpinlockAcquire(pDevExt->EventSpinlock);
1828 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1829 {
1830 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1831 return VINF_SUCCESS;
1832 }
1833 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1834
1835 pWait = VBoxGuestWaitAlloc(pDevExt, NULL);
1836 if (pWait)
1837 break;
1838 if (fInterruptible)
1839 return VERR_INTERRUPTED;
1840 RTThreadSleep(1);
1841 }
1842 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
1843 pWait->pHGCMReq = pHdr;
1844
1845 /*
1846 * Re-enter the spinlock and re-check for the condition.
1847 * If the condition is met, return.
1848 * Otherwise link us into the HGCM wait list and go to sleep.
1849 */
1850 RTSpinlockAcquire(pDevExt->EventSpinlock);
1851 RTListAppend(&pDevExt->HGCMWaitList, &pWait->ListNode);
1852 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1853 {
1854 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1855 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1856 return VINF_SUCCESS;
1857 }
1858 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1859
1860 if (fInterruptible)
1861 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
1862 else
1863 rc = RTSemEventMultiWait(pWait->Event, cMillies);
1864 if (rc == VERR_SEM_DESTROYED)
1865 return rc;
1866
1867 /*
1868 * Unlink, free and return.
1869 */
1870 if ( RT_FAILURE(rc)
1871 && rc != VERR_TIMEOUT
1872 && ( !fInterruptible
1873 || rc != VERR_INTERRUPTED))
1874 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
1875
1876 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1877 return rc;
1878}
1879
1880
1881/**
1882 * This is a callback for dealing with async waits.
1883 *
1884 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1885 */
1886static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
1887{
1888 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1889 Log(("VBoxGuestHGCMAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
1890 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1891 pDevExt,
1892 false /* fInterruptible */,
1893 u32User /* cMillies */);
1894}
1895
1896
1897/**
1898 * This is a callback for dealing with async waits with a timeout.
1899 *
1900 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1901 */
1902static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr,
1903 void *pvUser, uint32_t u32User)
1904{
1905 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1906 Log(("VBoxGuestHGCMAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
1907 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1908 pDevExt,
1909 true /* fInterruptible */,
1910 u32User /* cMillies */ );
1911
1912}
1913
1914
1915static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1916 VBoxGuestHGCMConnectInfo *pInfo, size_t *pcbDataReturned)
1917{
1918 int rc;
1919
1920 /*
1921 * The VbglHGCMConnect call will invoke the callback if the HGCM
1922 * call is performed in an ASYNC fashion. The function is not able
1923 * to deal with cancelled requests.
1924 */
1925 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
1926 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
1927 ? pInfo->Loc.u.host.achName : "<not local host>"));
1928
1929 rc = VbglR0HGCMInternalConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1930 if (RT_SUCCESS(rc))
1931 {
1932 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
1933 pInfo->u32ClientID, pInfo->result, rc));
1934 if (RT_SUCCESS(pInfo->result))
1935 {
1936 /*
1937 * Append the client id to the client id table.
1938 * If the table has somehow become filled up, we'll disconnect the session.
1939 */
1940 unsigned i;
1941 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1942 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1943 if (!pSession->aHGCMClientIds[i])
1944 {
1945 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
1946 break;
1947 }
1948 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
1949 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1950 {
1951 static unsigned s_cErrors = 0;
1952 VBoxGuestHGCMDisconnectInfo Info;
1953
1954 if (s_cErrors++ < 32)
1955 LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
1956
1957 Info.result = 0;
1958 Info.u32ClientID = pInfo->u32ClientID;
1959 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1960 return VERR_TOO_MANY_OPEN_FILES;
1961 }
1962 }
1963 if (pcbDataReturned)
1964 *pcbDataReturned = sizeof(*pInfo);
1965 }
1966 return rc;
1967}
1968
1969
1970static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
1971 size_t *pcbDataReturned)
1972{
1973 /*
1974 * Validate the client id and invalidate its entry while we're in the call.
1975 */
1976 int rc;
1977 const uint32_t u32ClientId = pInfo->u32ClientID;
1978 unsigned i;
1979 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1980 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1981 if (pSession->aHGCMClientIds[i] == u32ClientId)
1982 {
1983 pSession->aHGCMClientIds[i] = UINT32_MAX;
1984 break;
1985 }
1986 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
1987 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1988 {
1989 static unsigned s_cErrors = 0;
1990 if (s_cErrors++ > 32)
1991 LogRel(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
1992 return VERR_INVALID_HANDLE;
1993 }
1994
1995 /*
1996 * The VbglHGCMConnect call will invoke the callback if the HGCM
1997 * call is performed in an ASYNC fashion. The function is not able
1998 * to deal with cancelled requests.
1999 */
2000 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
2001 rc = VbglR0HGCMInternalDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
2002 if (RT_SUCCESS(rc))
2003 {
2004 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
2005 if (pcbDataReturned)
2006 *pcbDataReturned = sizeof(*pInfo);
2007 }
2008
2009 /* Update the client id array according to the result. */
2010 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2011 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
2012 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
2013 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
2014
2015 return rc;
2016}
2017
2018
2019static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt,
2020 PVBOXGUESTSESSION pSession,
2021 VBoxGuestHGCMCallInfo *pInfo,
2022 uint32_t cMillies, bool fInterruptible, bool f32bit, bool fUserData,
2023 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
2024{
2025 const uint32_t u32ClientId = pInfo->u32ClientID;
2026 uint32_t fFlags;
2027 size_t cbActual;
2028 unsigned i;
2029 int rc;
2030
2031 /*
2032 * Some more validations.
2033 */
2034 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
2035 {
2036 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
2037 return VERR_INVALID_PARAMETER;
2038 }
2039
2040 cbActual = cbExtra + sizeof(*pInfo);
2041#ifdef RT_ARCH_AMD64
2042 if (f32bit)
2043 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
2044 else
2045#endif
2046 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
2047 if (cbData < cbActual)
2048 {
2049 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
2050 cbData, cbActual));
2051 return VERR_INVALID_PARAMETER;
2052 }
2053
2054 /*
2055 * Validate the client id.
2056 */
2057 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2058 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2059 if (pSession->aHGCMClientIds[i] == u32ClientId)
2060 break;
2061 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
2062 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
2063 {
2064 static unsigned s_cErrors = 0;
2065 if (s_cErrors++ > 32)
2066 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
2067 return VERR_INVALID_HANDLE;
2068 }
2069
2070 /*
2071 * The VbglHGCMCall call will invoke the callback if the HGCM
2072 * call is performed in an ASYNC fashion. This function can
2073 * deal with cancelled requests, so we let user more requests
2074 * be interruptible (should add a flag for this later I guess).
2075 */
2076 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
2077 fFlags = !fUserData && pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
2078#ifdef RT_ARCH_AMD64
2079 if (f32bit)
2080 {
2081 if (fInterruptible)
2082 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2083 else
2084 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
2085 }
2086 else
2087#endif
2088 {
2089 if (fInterruptible)
2090 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2091 else
2092 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
2093 }
2094 if (RT_SUCCESS(rc))
2095 {
2096 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
2097 if (pcbDataReturned)
2098 *pcbDataReturned = cbActual;
2099 }
2100 else
2101 {
2102 if ( rc != VERR_INTERRUPTED
2103 && rc != VERR_TIMEOUT)
2104 {
2105 static unsigned s_cErrors = 0;
2106 if (s_cErrors++ < 32)
2107 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
2108 }
2109 else
2110 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
2111 }
2112 return rc;
2113}
2114
2115
2116#endif /* VBOX_WITH_HGCM */
2117
2118/**
2119 * Handle VBOXGUEST_IOCTL_CHECK_BALLOON from R3.
2120 *
2121 * Ask the host for the size of the balloon and try to set it accordingly. If
2122 * this approach fails because it's not supported, return with fHandleInR3 set
2123 * and let the user land supply memory we can lock via the other ioctl.
2124 *
2125 * @returns VBox status code.
2126 *
2127 * @param pDevExt The device extension.
2128 * @param pSession The session.
2129 * @param pInfo The output buffer.
2130 * @param pcbDataReturned Where to store the amount of returned data. Can
2131 * be NULL.
2132 */
2133static int VBoxGuestCommonIOCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2134 VBoxGuestCheckBalloonInfo *pInfo, size_t *pcbDataReturned)
2135{
2136 VMMDevGetMemBalloonChangeRequest *pReq;
2137 int rc;
2138
2139 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON\n"));
2140 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2141 AssertRCReturn(rc, rc);
2142
2143 /*
2144 * The first user trying to query/change the balloon becomes the
2145 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
2146 */
2147 if ( pDevExt->MemBalloon.pOwner != pSession
2148 && pDevExt->MemBalloon.pOwner == NULL)
2149 pDevExt->MemBalloon.pOwner = pSession;
2150
2151 if (pDevExt->MemBalloon.pOwner == pSession)
2152 {
2153 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
2154 if (RT_SUCCESS(rc))
2155 {
2156 /*
2157 * This is a response to that event. Setting this bit means that
2158 * we request the value from the host and change the guest memory
2159 * balloon according to this value.
2160 */
2161 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
2162 rc = VbglGRPerform(&pReq->header);
2163 if (RT_SUCCESS(rc))
2164 {
2165 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
2166 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
2167
2168 pInfo->cBalloonChunks = pReq->cBalloonChunks;
2169 pInfo->fHandleInR3 = false;
2170
2171 rc = vboxGuestSetBalloonSizeKernel(pDevExt, pReq->cBalloonChunks, &pInfo->fHandleInR3);
2172 /* Ignore various out of memory failures. */
2173 if ( rc == VERR_NO_MEMORY
2174 || rc == VERR_NO_PHYS_MEMORY
2175 || rc == VERR_NO_CONT_MEMORY)
2176 rc = VINF_SUCCESS;
2177
2178 if (pcbDataReturned)
2179 *pcbDataReturned = sizeof(VBoxGuestCheckBalloonInfo);
2180 }
2181 else
2182 LogRel(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON: VbglGRPerform failed. rc=%Rrc\n", rc));
2183 VbglGRFree(&pReq->header);
2184 }
2185 }
2186 else
2187 rc = VERR_PERMISSION_DENIED;
2188
2189 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2190 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON returns %Rrc\n", rc));
2191 return rc;
2192}
2193
2194
2195/**
2196 * Handle a request for changing the memory balloon.
2197 *
2198 * @returns VBox status code.
2199 *
2200 * @param pDevExt The device extention.
2201 * @param pSession The session.
2202 * @param pInfo The change request structure (input).
2203 * @param pcbDataReturned Where to store the amount of returned data. Can
2204 * be NULL.
2205 */
2206static int VBoxGuestCommonIOCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2207 VBoxGuestChangeBalloonInfo *pInfo, size_t *pcbDataReturned)
2208{
2209 int rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2210 AssertRCReturn(rc, rc);
2211
2212 if (!pDevExt->MemBalloon.fUseKernelAPI)
2213 {
2214 /*
2215 * The first user trying to query/change the balloon becomes the
2216 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
2217 */
2218 if ( pDevExt->MemBalloon.pOwner != pSession
2219 && pDevExt->MemBalloon.pOwner == NULL)
2220 pDevExt->MemBalloon.pOwner = pSession;
2221
2222 if (pDevExt->MemBalloon.pOwner == pSession)
2223 {
2224 rc = vboxGuestSetBalloonSizeFromUser(pDevExt, pSession, pInfo->u64ChunkAddr, !!pInfo->fInflate);
2225 if (pcbDataReturned)
2226 *pcbDataReturned = 0;
2227 }
2228 else
2229 rc = VERR_PERMISSION_DENIED;
2230 }
2231 else
2232 rc = VERR_PERMISSION_DENIED;
2233
2234 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2235 return rc;
2236}
2237
2238
2239/**
2240 * Handle a request for writing a core dump of the guest on the host.
2241 *
2242 * @returns VBox status code.
2243 *
2244 * @param pDevExt The device extension.
2245 * @param pInfo The output buffer.
2246 */
2247static int VBoxGuestCommonIOCtl_WriteCoreDump(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWriteCoreDump *pInfo)
2248{
2249 VMMDevReqWriteCoreDump *pReq = NULL;
2250 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_WriteCoreDump);
2251 if (RT_FAILURE(rc))
2252 {
2253 Log(("VBoxGuestCommonIOCtl: WRITE_CORE_DUMP: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
2254 sizeof(*pReq), sizeof(*pReq), rc));
2255 return rc;
2256 }
2257
2258 pReq->fFlags = pInfo->fFlags;
2259 rc = VbglGRPerform(&pReq->header);
2260 if (RT_FAILURE(rc))
2261 Log(("VBoxGuestCommonIOCtl: WRITE_CORE_DUMP: VbglGRPerform failed, rc=%Rrc!\n", rc));
2262
2263 VbglGRFree(&pReq->header);
2264 return rc;
2265}
2266
2267
2268#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
2269/**
2270 * Enables the VRDP session and saves its session ID.
2271 *
2272 * @returns VBox status code.
2273 *
2274 * @param pDevExt The device extention.
2275 * @param pSession The session.
2276 */
2277static int VBoxGuestCommonIOCtl_EnableVRDPSession(VBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2278{
2279 /* Nothing to do here right now, since this only is supported on Windows at the moment. */
2280 return VERR_NOT_IMPLEMENTED;
2281}
2282
2283
2284/**
2285 * Disables the VRDP session.
2286 *
2287 * @returns VBox status code.
2288 *
2289 * @param pDevExt The device extention.
2290 * @param pSession The session.
2291 */
2292static int VBoxGuestCommonIOCtl_DisableVRDPSession(VBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2293{
2294 /* Nothing to do here right now, since this only is supported on Windows at the moment. */
2295 return VERR_NOT_IMPLEMENTED;
2296}
2297#endif /* VBOX_WITH_VRDP_SESSION_HANDLING */
2298
2299#ifdef DEBUG
2300/** Unit test SetMouseStatus instead of really executing the request. */
2301static bool g_test_fSetMouseStatus = false;
2302/** When unit testing SetMouseStatus, the fake RC for the GR to return. */
2303static int g_test_SetMouseStatusGRRC;
2304/** When unit testing SetMouseStatus this will be set to the status passed to
2305 * the GR. */
2306static uint32_t g_test_statusSetMouseStatus;
2307#endif
2308
2309static int vboxguestcommonSetMouseStatus(uint32_t fFeatures)
2310{
2311 VMMDevReqMouseStatus *pReq;
2312 int rc;
2313
2314 LogRelFlowFunc(("fFeatures=%u\n", (int) fFeatures));
2315 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetMouseStatus);
2316 if (RT_SUCCESS(rc))
2317 {
2318 pReq->mouseFeatures = fFeatures;
2319 pReq->pointerXPos = 0;
2320 pReq->pointerYPos = 0;
2321#ifdef DEBUG
2322 if (g_test_fSetMouseStatus)
2323 {
2324 g_test_statusSetMouseStatus = pReq->mouseFeatures;
2325 rc = g_test_SetMouseStatusGRRC;
2326 }
2327 else
2328#endif
2329 rc = VbglGRPerform(&pReq->header);
2330 VbglGRFree(&pReq->header);
2331 }
2332 LogRelFlowFunc(("rc=%Rrc\n", rc));
2333 return rc;
2334}
2335
2336
2337/**
2338 * Sets the mouse status features for this session and updates them
2339 * globally. We aim to ensure that if several threads call this in
2340 * parallel the most recent status will always end up being set.
2341 *
2342 * @returns VBox status code.
2343 *
2344 * @param pDevExt The device extention.
2345 * @param pSession The session.
2346 * @param fFeatures New bitmap of enabled features.
2347 */
2348static int VBoxGuestCommonIOCtl_SetMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fFeatures)
2349{
2350 uint32_t fNewDevExtStatus = 0;
2351 unsigned i;
2352 int rc;
2353 /* Exit early if nothing has changed - hack to work around the
2354 * Windows Additions not using the common code. */
2355 bool fNoAction;
2356
2357 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2358
2359 /* For all the bits which the guest is allowed to set, check whether the
2360 * requested value is different to the current one and adjust the global
2361 * usage counter and if appropriate the global state if so. */
2362 for (i = 0; i < sizeof(fFeatures) * 8; i++)
2363 {
2364 if (RT_BIT_32(i) & VMMDEV_MOUSE_GUEST_MASK)
2365 {
2366 if ( (RT_BIT_32(i) & fFeatures)
2367 && !(RT_BIT_32(i) & pSession->fMouseStatus))
2368 pDevExt->acMouseFeatureUsage[i]++;
2369 else if ( !(RT_BIT_32(i) & fFeatures)
2370 && (RT_BIT_32(i) & pSession->fMouseStatus))
2371 pDevExt->acMouseFeatureUsage[i]--;
2372 }
2373 if (pDevExt->acMouseFeatureUsage[i] > 0)
2374 fNewDevExtStatus |= RT_BIT_32(i);
2375 }
2376
2377 pSession->fMouseStatus = fFeatures & VMMDEV_MOUSE_GUEST_MASK;
2378 fNoAction = (pDevExt->fMouseStatus == fNewDevExtStatus);
2379 pDevExt->fMouseStatus = fNewDevExtStatus;
2380
2381 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
2382 if (fNoAction)
2383 return VINF_SUCCESS;
2384
2385 do
2386 {
2387 fNewDevExtStatus = pDevExt->fMouseStatus;
2388 rc = vboxguestcommonSetMouseStatus(fNewDevExtStatus);
2389 } while ( RT_SUCCESS(rc)
2390 && fNewDevExtStatus != pDevExt->fMouseStatus);
2391
2392 return rc;
2393}
2394
2395
2396#ifdef DEBUG
2397/** Unit test for the SET_MOUSE_STATUS IoCtl. Since this is closely tied to
2398 * the code in question it probably makes most sense to keep it next to the
2399 * code. */
2400static void testSetMouseStatus(void)
2401{
2402 uint32_t u32Data;
2403 int rc;
2404 RTSPINLOCK Spinlock;
2405
2406 g_test_fSetMouseStatus = true;
2407 rc = RTSpinlockCreate(&Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestTest");
2408 AssertRCReturnVoid(rc);
2409 {
2410 VBOXGUESTDEVEXT DevExt = { 0 };
2411 VBOXGUESTSESSION Session = { 0 };
2412
2413 g_test_statusSetMouseStatus = ~0;
2414 g_test_SetMouseStatusGRRC = VINF_SUCCESS;
2415 DevExt.SessionSpinlock = Spinlock;
2416 u32Data = VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE;
2417 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2418 &Session, &u32Data, sizeof(u32Data), NULL);
2419 AssertRCSuccess(rc);
2420 AssertMsg( g_test_statusSetMouseStatus
2421 == VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE,
2422 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2423 DevExt.acMouseFeatureUsage[ASMBitFirstSetU32(VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR) - 1] = 1;
2424 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2425 &Session, &u32Data, sizeof(u32Data), NULL);
2426 AssertRCSuccess(rc);
2427 AssertMsg( g_test_statusSetMouseStatus
2428 == ( VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE
2429 | VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR),
2430 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2431 u32Data = VMMDEV_MOUSE_HOST_WANTS_ABSOLUTE; /* Can't change this */
2432 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2433 &Session, &u32Data, sizeof(u32Data), NULL);
2434 AssertRCSuccess(rc);
2435 AssertMsg( g_test_statusSetMouseStatus
2436 == VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR,
2437 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2438 u32Data = VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR;
2439 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2440 &Session, &u32Data, sizeof(u32Data), NULL);
2441 AssertRCSuccess(rc);
2442 AssertMsg( g_test_statusSetMouseStatus
2443 == VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR,
2444 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2445 u32Data = 0;
2446 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2447 &Session, &u32Data, sizeof(u32Data), NULL);
2448 AssertRCSuccess(rc);
2449 AssertMsg( g_test_statusSetMouseStatus
2450 == VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR,
2451 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2452 AssertMsg(DevExt.acMouseFeatureUsage[ASMBitFirstSetU32(VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR) - 1] == 1,
2453 ("Actual value: %d\n", DevExt.acMouseFeatureUsage[ASMBitFirstSetU32(VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR)]));
2454 g_test_SetMouseStatusGRRC = VERR_UNRESOLVED_ERROR;
2455 /* This should succeed as the host request should not be made
2456 * since nothing has changed. */
2457 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2458 &Session, &u32Data, sizeof(u32Data), NULL);
2459 AssertRCSuccess(rc);
2460 /* This should fail with VERR_UNRESOLVED_ERROR as set above. */
2461 u32Data = VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE;
2462 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2463 &Session, &u32Data, sizeof(u32Data), NULL);
2464 AssertMsg(rc == VERR_UNRESOLVED_ERROR, ("rc == %Rrc\n", rc));
2465 /* Untested paths: out of memory; race setting status to host */
2466 }
2467 RTSpinlockDestroy(Spinlock);
2468 g_test_fSetMouseStatus = false;
2469}
2470#endif
2471
2472
2473/**
2474 * Guest backdoor logging.
2475 *
2476 * @returns VBox status code.
2477 *
2478 * @param pDevExt The device extension.
2479 * @param pch The log message (need not be NULL terminated).
2480 * @param cbData Size of the buffer.
2481 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2482 */
2483static int VBoxGuestCommonIOCtl_Log(PVBOXGUESTDEVEXT pDevExt, const char *pch, size_t cbData, size_t *pcbDataReturned)
2484{
2485 NOREF(pch);
2486 NOREF(cbData);
2487 if (pDevExt->fLoggingEnabled)
2488 RTLogBackdoorPrintf("%.*s", cbData, pch);
2489 else
2490 Log(("%.*s", cbData, pch));
2491 if (pcbDataReturned)
2492 *pcbDataReturned = 0;
2493 return VINF_SUCCESS;
2494}
2495
2496static bool VBoxGuestCommonGuestCapsValidateValues(uint32_t fCaps)
2497{
2498 if (fCaps & (~(VMMDEV_GUEST_SUPPORTS_SEAMLESS | VMMDEV_GUEST_SUPPORTS_GUEST_HOST_WINDOW_MAPPING | VMMDEV_GUEST_SUPPORTS_GRAPHICS)))
2499 {
2500 LogRel(("VBoxGuestCommonGuestCapsValidateValues: invalid guest caps 0x%x\n", fCaps));
2501 return false;
2502 }
2503 return true;
2504}
2505
2506static void VBoxGuestCommonCheckEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fGenFakeEvents)
2507{
2508 RTSpinlockAcquire(pDevExt->EventSpinlock);
2509 uint32_t fEvents = fGenFakeEvents | pDevExt->f32PendingEvents;
2510 PVBOXGUESTWAIT pWait;
2511 PVBOXGUESTWAIT pSafe;
2512
2513 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
2514 {
2515 uint32_t fHandledEvents = VBoxGuestCommonGetHandledEventsLocked(pDevExt, pWait->pSession);
2516 if ( (pWait->fReqEvents & fEvents & fHandledEvents)
2517 && !pWait->fResEvents)
2518 {
2519 pWait->fResEvents = pWait->fReqEvents & fEvents & fHandledEvents;
2520 Assert(!(fGenFakeEvents & pWait->fResEvents) || pSession == pWait->pSession);
2521 fEvents &= ~pWait->fResEvents;
2522 RTListNodeRemove(&pWait->ListNode);
2523#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2524 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
2525#else
2526 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
2527 int rc = RTSemEventMultiSignal(pWait->Event);
2528 AssertRC(rc);
2529#endif
2530 if (!fEvents)
2531 break;
2532 }
2533 }
2534 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
2535
2536 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
2537
2538#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2539 VBoxGuestWaitDoWakeUps(pDevExt);
2540#endif
2541}
2542
2543static int VBoxGuestCommonGuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fOrMask, uint32_t fNotMask, VBOXGUESTCAPSACQUIRE_FLAGS enmFlags)
2544{
2545 uint32_t fSetCaps = 0;
2546
2547 LogRel(("VBoxGuest: VBoxGuestCommonGuestCapsAcquire: pSession(0x%p), OR(0x%x), NOT(0x%x), flags(0x%x)\n", pSession, fOrMask, fNotMask, enmFlags));
2548
2549 if (!VBoxGuestCommonGuestCapsValidateValues(fOrMask))
2550 {
2551 LogRel(("invalid fOrMask 0x%x\n", fOrMask));
2552 return VERR_INVALID_PARAMETER;
2553 }
2554
2555 if (enmFlags != VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE
2556 && enmFlags != VBOXGUESTCAPSACQUIRE_FLAGS_NONE)
2557 {
2558 LogRel(("invalid enmFlags %d\n", enmFlags));
2559 return VERR_INVALID_PARAMETER;
2560 }
2561 if (!VBoxGuestCommonGuestCapsModeSet(pDevExt, fOrMask, true, &fSetCaps))
2562 {
2563 LogRel(("calling caps acquire for set caps %d\n", fOrMask));
2564 return VERR_INVALID_STATE;
2565 }
2566
2567 if (enmFlags & VBOXGUESTCAPSACQUIRE_FLAGS_CONFIG_ACQUIRE_MODE)
2568 {
2569 Log(("Configured Acquire caps: 0x%x\n", fOrMask));
2570 return VINF_SUCCESS;
2571 }
2572
2573 /* the fNotMask no need to have all values valid,
2574 * invalid ones will simply be ignored */
2575 uint32_t fCurrentOwnedCaps;
2576 uint32_t fSessionNotCaps;
2577 uint32_t fSessionOrCaps;
2578 uint32_t fOtherConflictingCaps;
2579
2580 fNotMask &= ~fOrMask;
2581
2582 RTSpinlockAcquire(pDevExt->EventSpinlock);
2583
2584 fCurrentOwnedCaps = pSession->u32AquiredGuestCaps;
2585 fSessionNotCaps = fCurrentOwnedCaps & fNotMask;
2586 fSessionOrCaps = fOrMask & ~fCurrentOwnedCaps;
2587 fOtherConflictingCaps = pDevExt->u32GuestCaps & ~fCurrentOwnedCaps;
2588 fOtherConflictingCaps &= fSessionOrCaps;
2589
2590 if (!fOtherConflictingCaps)
2591 {
2592 if (fSessionOrCaps)
2593 {
2594 pSession->u32AquiredGuestCaps |= fSessionOrCaps;
2595 pDevExt->u32GuestCaps |= fSessionOrCaps;
2596 }
2597
2598 if (fSessionNotCaps)
2599 {
2600 pSession->u32AquiredGuestCaps &= ~fSessionNotCaps;
2601 pDevExt->u32GuestCaps &= ~fSessionNotCaps;
2602 }
2603 }
2604
2605 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
2606
2607 if (fOtherConflictingCaps)
2608 {
2609 Log(("VBoxGuest: Caps 0x%x were busy\n", fOtherConflictingCaps));
2610 return VERR_RESOURCE_BUSY;
2611 }
2612
2613 /* now do host notification outside the lock */
2614 if (!fSessionOrCaps && !fSessionNotCaps)
2615 {
2616 /* no changes, return */
2617 return VINF_SUCCESS;
2618 }
2619
2620 int rc = VBoxGuestSetGuestCapabilities(fSessionOrCaps, fSessionNotCaps);
2621 if (!RT_SUCCESS(rc))
2622 {
2623 LogRel(("VBoxGuest: VBoxGuestCommonGuestCapsAcquire: VBoxGuestSetGuestCapabilities failed, rc %d\n", rc));
2624
2625 /* Failure branch
2626 * this is generally bad since e.g. failure to release the caps may result in other sessions not being able to use it
2627 * so we are not trying to restore the caps back to their values before the VBoxGuestCommonGuestCapsAcquire call,
2628 * but just pretend everithing is OK.
2629 * @todo: better failure handling mechanism? */
2630 }
2631
2632 /* success! */
2633 uint32_t fGenFakeEvents = 0;
2634
2635 if (fSessionOrCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
2636 {
2637 /* generate the seamless change event so that the r3 app could synch with the seamless state
2638 * although this introduces a false alarming of r3 client, it still solve the problem of
2639 * client state inconsistency in multiuser environment */
2640 fGenFakeEvents |= VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
2641 }
2642
2643 /* since the acquire filter mask has changed, we need to process events in any way to ensure they go from pending events field
2644 * to the proper (un-filtered) entries */
2645 VBoxGuestCommonCheckEvents(pDevExt, pSession, fGenFakeEvents);
2646
2647 return VINF_SUCCESS;
2648}
2649
2650static int VBoxGuestCommonIOCTL_GuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestCapsAquire *pAcquire)
2651{
2652 int rc = VBoxGuestCommonGuestCapsAcquire(pDevExt, pSession, pAcquire->u32OrMask, pAcquire->u32NotMask, pAcquire->enmFlags);
2653 if (!RT_SUCCESS(rc))
2654 LogRel(("VBoxGuestCommonGuestCapsAcquire: failed rc %d\n", rc));
2655 pAcquire->rc = rc;
2656 return VINF_SUCCESS;
2657}
2658
2659
2660/**
2661 * Common IOCtl for user to kernel and kernel to kernel communication.
2662 *
2663 * This function only does the basic validation and then invokes
2664 * worker functions that takes care of each specific function.
2665 *
2666 * @returns VBox status code.
2667 *
2668 * @param iFunction The requested function.
2669 * @param pDevExt The device extension.
2670 * @param pSession The client session.
2671 * @param pvData The input/output data buffer. Can be NULL depending on the function.
2672 * @param cbData The max size of the data buffer.
2673 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2674 */
2675int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2676 void *pvData, size_t cbData, size_t *pcbDataReturned)
2677{
2678 int rc;
2679 Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
2680 iFunction, pDevExt, pSession, pvData, cbData));
2681
2682 /*
2683 * Make sure the returned data size is set to zero.
2684 */
2685 if (pcbDataReturned)
2686 *pcbDataReturned = 0;
2687
2688 /*
2689 * Define some helper macros to simplify validation.
2690 */
2691#define CHECKRET_RING0(mnemonic) \
2692 do { \
2693 if (pSession->R0Process != NIL_RTR0PROCESS) \
2694 { \
2695 LogFunc((mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
2696 pSession->Process, (uintptr_t)pSession->R0Process)); \
2697 return VERR_PERMISSION_DENIED; \
2698 } \
2699 } while (0)
2700#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
2701 do { \
2702 if (cbData < (cbMin)) \
2703 { \
2704 LogFunc((mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
2705 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
2706 return VERR_BUFFER_OVERFLOW; \
2707 } \
2708 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
2709 { \
2710 LogFunc((mnemonic ": Invalid pointer %p\n", pvData)); \
2711 return VERR_INVALID_POINTER; \
2712 } \
2713 } while (0)
2714#define CHECKRET_SIZE(mnemonic, cb) \
2715 do { \
2716 if (cbData != (cb)) \
2717 { \
2718 LogFunc((mnemonic ": cbData=%#zx (%zu) expected is %#zx (%zu)\n", \
2719 cbData, cbData, (size_t)(cb), (size_t)(cb))); \
2720 return VERR_BUFFER_OVERFLOW; \
2721 } \
2722 if ((cb) != 0 && !VALID_PTR(pvData)) \
2723 { \
2724 LogFunc((mnemonic ": Invalid pointer %p\n", pvData)); \
2725 return VERR_INVALID_POINTER; \
2726 } \
2727 } while (0)
2728
2729
2730 /*
2731 * Deal with variably sized requests first.
2732 */
2733 rc = VINF_SUCCESS;
2734 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
2735 {
2736 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
2737 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
2738 }
2739#ifdef VBOX_WITH_HGCM
2740 /*
2741 * These ones are a bit tricky.
2742 */
2743 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
2744 {
2745 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2746 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2747 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2748 fInterruptible, false /*f32bit*/, false /* fUserData */,
2749 0, cbData, pcbDataReturned);
2750 }
2751 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
2752 {
2753 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2754 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2755 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2756 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2757 false /*f32bit*/, false /* fUserData */,
2758 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2759 }
2760 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_USERDATA(0)))
2761 {
2762 bool fInterruptible = true;
2763 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2764 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2765 fInterruptible, false /*f32bit*/, true /* fUserData */,
2766 0, cbData, pcbDataReturned);
2767 }
2768# ifdef RT_ARCH_AMD64
2769 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
2770 {
2771 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2772 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2773 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2774 fInterruptible, true /*f32bit*/, false /* fUserData */,
2775 0, cbData, pcbDataReturned);
2776 }
2777 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
2778 {
2779 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2780 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2781 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2782 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2783 true /*f32bit*/, false /* fUserData */,
2784 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2785 }
2786# endif
2787#endif /* VBOX_WITH_HGCM */
2788 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
2789 {
2790 CHECKRET_MIN_SIZE("LOG", 1);
2791 rc = VBoxGuestCommonIOCtl_Log(pDevExt, (char *)pvData, cbData, pcbDataReturned);
2792 }
2793 else
2794 {
2795 switch (iFunction)
2796 {
2797 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
2798 CHECKRET_RING0("GETVMMDEVPORT");
2799 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
2800 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
2801 break;
2802
2803#ifndef RT_OS_WINDOWS /* Windows has its own implementation of this. */
2804 case VBOXGUEST_IOCTL_SET_MOUSE_NOTIFY_CALLBACK:
2805 CHECKRET_RING0("SET_MOUSE_NOTIFY_CALLBACK");
2806 CHECKRET_SIZE("SET_MOUSE_NOTIFY_CALLBACK", sizeof(VBoxGuestMouseSetNotifyCallback));
2807 rc = VBoxGuestCommonIOCtl_SetMouseNotifyCallback(pDevExt, (VBoxGuestMouseSetNotifyCallback *)pvData);
2808 break;
2809#endif
2810
2811 case VBOXGUEST_IOCTL_WAITEVENT:
2812 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
2813 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, pSession, (VBoxGuestWaitEventInfo *)pvData,
2814 pcbDataReturned, pSession->R0Process != NIL_RTR0PROCESS);
2815 break;
2816
2817 case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
2818 if (cbData != 0)
2819 rc = VERR_INVALID_PARAMETER;
2820 rc = VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
2821 break;
2822
2823 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
2824 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
2825 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, (VBoxGuestFilterMaskInfo *)pvData);
2826 break;
2827
2828#ifdef VBOX_WITH_HGCM
2829 case VBOXGUEST_IOCTL_HGCM_CONNECT:
2830# ifdef RT_ARCH_AMD64
2831 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
2832# endif
2833 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
2834 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
2835 break;
2836
2837 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
2838# ifdef RT_ARCH_AMD64
2839 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
2840# endif
2841 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
2842 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
2843 break;
2844#endif /* VBOX_WITH_HGCM */
2845
2846 case VBOXGUEST_IOCTL_CHECK_BALLOON:
2847 CHECKRET_MIN_SIZE("CHECK_MEMORY_BALLOON", sizeof(VBoxGuestCheckBalloonInfo));
2848 rc = VBoxGuestCommonIOCtl_CheckMemoryBalloon(pDevExt, pSession, (VBoxGuestCheckBalloonInfo *)pvData, pcbDataReturned);
2849 break;
2850
2851 case VBOXGUEST_IOCTL_CHANGE_BALLOON:
2852 CHECKRET_MIN_SIZE("CHANGE_MEMORY_BALLOON", sizeof(VBoxGuestChangeBalloonInfo));
2853 rc = VBoxGuestCommonIOCtl_ChangeMemoryBalloon(pDevExt, pSession, (VBoxGuestChangeBalloonInfo *)pvData, pcbDataReturned);
2854 break;
2855
2856 case VBOXGUEST_IOCTL_WRITE_CORE_DUMP:
2857 CHECKRET_MIN_SIZE("WRITE_CORE_DUMP", sizeof(VBoxGuestWriteCoreDump));
2858 rc = VBoxGuestCommonIOCtl_WriteCoreDump(pDevExt, (VBoxGuestWriteCoreDump *)pvData);
2859 break;
2860
2861#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
2862 case VBOXGUEST_IOCTL_ENABLE_VRDP_SESSION:
2863 rc = VBoxGuestCommonIOCtl_EnableVRDPSession(pDevExt, pSession);
2864 break;
2865
2866 case VBOXGUEST_IOCTL_DISABLE_VRDP_SESSION:
2867 rc = VBoxGuestCommonIOCtl_DisableVRDPSession(pDevExt, pSession);
2868 break;
2869#endif /* VBOX_WITH_VRDP_SESSION_HANDLING */
2870 case VBOXGUEST_IOCTL_SET_MOUSE_STATUS:
2871 CHECKRET_SIZE("SET_MOUSE_STATUS", sizeof(uint32_t));
2872 rc = VBoxGuestCommonIOCtl_SetMouseStatus(pDevExt, pSession,
2873 *(uint32_t *)pvData);
2874 break;
2875
2876#ifdef VBOX_WITH_DPC_LATENCY_CHECKER
2877 case VBOXGUEST_IOCTL_DPC_LATENCY_CHECKER:
2878 CHECKRET_SIZE("DPC_LATENCY_CHECKER", 0);
2879 rc = VbgdNtIOCtl_DpcLatencyChecker();
2880 break;
2881#endif
2882
2883 case VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE:
2884 CHECKRET_SIZE("GUEST_CAPS_ACQUIRE", sizeof(VBoxGuestCapsAquire));
2885 rc = VBoxGuestCommonIOCTL_GuestCapsAcquire(pDevExt, pSession, (VBoxGuestCapsAquire*)pvData);
2886 *pcbDataReturned = sizeof(VBoxGuestCapsAquire);
2887 break;
2888
2889 default:
2890 {
2891 LogRel(("VBoxGuestCommonIOCtl: Unknown request iFunction=%#x Stripped size=%#x\n", iFunction,
2892 VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
2893 rc = VERR_NOT_SUPPORTED;
2894 break;
2895 }
2896 }
2897 }
2898
2899 Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
2900 return rc;
2901}
2902
2903
2904
2905/**
2906 * Common interrupt service routine.
2907 *
2908 * This deals with events and with waking up thread waiting for those events.
2909 *
2910 * @returns true if it was our interrupt, false if it wasn't.
2911 * @param pDevExt The VBoxGuest device extension.
2912 */
2913bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
2914{
2915#ifndef RT_OS_WINDOWS
2916 VBoxGuestMouseSetNotifyCallback MouseNotifyCallback = { NULL, NULL };
2917#endif
2918 bool fMousePositionChanged = false;
2919 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
2920 int rc = 0;
2921 bool fOurIrq;
2922
2923 /*
2924 * Make sure we've initialized the device extension.
2925 */
2926 if (RT_UNLIKELY(!pReq))
2927 return false;
2928
2929 /*
2930 * Enter the spinlock, increase the ISR count and check if it's our IRQ or
2931 * not.
2932 */
2933 RTSpinlockAcquire(pDevExt->EventSpinlock);
2934 ASMAtomicIncU32(&pDevExt->cISR);
2935 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
2936 if (fOurIrq)
2937 {
2938 /*
2939 * Acknowlegde events.
2940 * We don't use VbglGRPerform here as it may take another spinlocks.
2941 */
2942 pReq->header.rc = VERR_INTERNAL_ERROR;
2943 pReq->events = 0;
2944 ASMCompilerBarrier();
2945 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
2946 ASMCompilerBarrier(); /* paranoia */
2947 if (RT_SUCCESS(pReq->header.rc))
2948 {
2949 uint32_t fEvents = pReq->events;
2950 PVBOXGUESTWAIT pWait;
2951 PVBOXGUESTWAIT pSafe;
2952
2953 Log(("VBoxGuestCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
2954
2955 /*
2956 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
2957 */
2958 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
2959 {
2960#ifndef RT_OS_WINDOWS
2961 MouseNotifyCallback = pDevExt->MouseNotifyCallback;
2962#endif
2963 fMousePositionChanged = true;
2964 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
2965 }
2966
2967#ifdef VBOX_WITH_HGCM
2968 /*
2969 * The HGCM event/list is kind of different in that we evaluate all entries.
2970 */
2971 if (fEvents & VMMDEV_EVENT_HGCM)
2972 {
2973 RTListForEachSafe(&pDevExt->HGCMWaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
2974 {
2975 if (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE)
2976 {
2977 pWait->fResEvents = VMMDEV_EVENT_HGCM;
2978 RTListNodeRemove(&pWait->ListNode);
2979# ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2980 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
2981# else
2982 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
2983 rc |= RTSemEventMultiSignal(pWait->Event);
2984# endif
2985 }
2986 }
2987 fEvents &= ~VMMDEV_EVENT_HGCM;
2988 }
2989#endif
2990
2991 /*
2992 * Normal FIFO waiter evaluation.
2993 */
2994 fEvents |= pDevExt->f32PendingEvents;
2995 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
2996 {
2997 uint32_t fHandledEvents = VBoxGuestCommonGetHandledEventsLocked(pDevExt, pWait->pSession);
2998 if ( (pWait->fReqEvents & fEvents & fHandledEvents)
2999 && !pWait->fResEvents)
3000 {
3001 pWait->fResEvents = pWait->fReqEvents & fEvents & fHandledEvents;
3002 fEvents &= ~pWait->fResEvents;
3003 RTListNodeRemove(&pWait->ListNode);
3004#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
3005 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
3006#else
3007 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
3008 rc |= RTSemEventMultiSignal(pWait->Event);
3009#endif
3010 if (!fEvents)
3011 break;
3012 }
3013 }
3014 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
3015 }
3016 else /* something is serious wrong... */
3017 Log(("VBoxGuestCommonISR: acknowledge events failed rc=%Rrc (events=%#x)!!\n",
3018 pReq->header.rc, pReq->events));
3019 }
3020 else
3021 LogFlow(("VBoxGuestCommonISR: not ours\n"));
3022
3023 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
3024
3025#if defined(VBOXGUEST_USE_DEFERRED_WAKE_UP) && !defined(RT_OS_WINDOWS)
3026 /*
3027 * Do wake-ups.
3028 * Note. On Windows this isn't possible at this IRQL, so a DPC will take
3029 * care of it.
3030 */
3031 VBoxGuestWaitDoWakeUps(pDevExt);
3032#endif
3033
3034 /*
3035 * Work the poll and async notification queues on OSes that implements that.
3036 * (Do this outside the spinlock to prevent some recursive spinlocking.)
3037 */
3038 if (fMousePositionChanged)
3039 {
3040 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
3041 VBoxGuestNativeISRMousePollEvent(pDevExt);
3042#ifndef RT_OS_WINDOWS
3043 if (MouseNotifyCallback.pfnNotify)
3044 MouseNotifyCallback.pfnNotify(MouseNotifyCallback.pvUser);
3045#endif
3046 }
3047
3048 ASMAtomicDecU32(&pDevExt->cISR);
3049 Assert(rc == 0);
3050 return fOurIrq;
3051}
3052
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette