VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 45760

Last change on this file since 45760 was 45760, checked in by vboxsync, 12 years ago

VBoxTray/VBoxGuest: proper guest caps acquisition & cleanup (to be continued)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 104.6 KB
Line 
1/* $Id: VBoxGuest.cpp 45760 2013-04-26 07:40:05Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#define LOG_GROUP LOG_GROUP_DEFAULT
32#include "VBoxGuestInternal.h"
33#include "VBoxGuest2.h"
34#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
35#include <VBox/log.h>
36#include <iprt/mem.h>
37#include <iprt/time.h>
38#include <iprt/memobj.h>
39#include <iprt/asm.h>
40#include <iprt/asm-amd64-x86.h>
41#include <iprt/string.h>
42#include <iprt/process.h>
43#include <iprt/assert.h>
44#include <iprt/param.h>
45#ifdef VBOX_WITH_HGCM
46# include <iprt/thread.h>
47#endif
48#include "version-generated.h"
49#if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
50# include "revision-generated.h"
51#endif
52#ifdef RT_OS_WINDOWS
53# ifndef CTL_CODE
54# include <Windows.h>
55# endif
56#endif
57#if defined(RT_OS_SOLARIS) || defined(RT_OS_DARWIN)
58# include <iprt/rand.h>
59#endif
60
61
62/*******************************************************************************
63* Internal Functions *
64*******************************************************************************/
65#ifdef VBOX_WITH_HGCM
66static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
67#endif
68#ifdef DEBUG
69static void testSetMouseStatus(void);
70#endif
71static int VBoxGuestCommonIOCtl_SetMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fFeatures);
72
73static int VBoxGuestCommonGuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fOrMask, uint32_t fNotMask);
74
75DECLINLINE(uint32_t) VBoxGuestCommonGetHandledEventsLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
76{
77 if(!pDevExt->u32GuestCapsAcquireMode)
78 return VMMDEV_EVENT_VALID_EVENT_MASK;
79
80 uint32_t u32AquiredGuestCaps = pSession->u32AquiredGuestCaps;
81 /* only VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST is filtered,
82 * for now we allow VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST to come through
83 * to make it still be propagated to r3 client and let clients deal with seamless validity on their own for simplicity */
84 uint32_t u32CleanupEvents = VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST;
85 if (u32AquiredGuestCaps & VMMDEV_GUEST_SUPPORTS_GRAPHICS)
86 u32CleanupEvents &= ~VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST;
87 if (u32AquiredGuestCaps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)
88 u32CleanupEvents &= ~VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST;
89
90 return VMMDEV_EVENT_VALID_EVENT_MASK & ~u32CleanupEvents;
91}
92
93DECLINLINE(uint32_t) VBoxGuestCommonGetAndCleanPendingEventsLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fReqEvents)
94{
95 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents & VBoxGuestCommonGetHandledEventsLocked(pDevExt, pSession);
96 if (fMatches)
97 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
98 return fMatches;
99}
100
101/*******************************************************************************
102* Global Variables *
103*******************************************************************************/
104static const size_t cbChangeMemBalloonReq = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
105
106#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
107/**
108 * Drag in the rest of IRPT since we share it with the
109 * rest of the kernel modules on Solaris.
110 */
111PFNRT g_apfnVBoxGuestIPRTDeps[] =
112{
113 /* VirtioNet */
114 (PFNRT)RTRandBytes,
115 /* RTSemMutex* */
116 (PFNRT)RTSemMutexCreate,
117 (PFNRT)RTSemMutexDestroy,
118 (PFNRT)RTSemMutexRequest,
119 (PFNRT)RTSemMutexRequestNoResume,
120 (PFNRT)RTSemMutexRequestDebug,
121 (PFNRT)RTSemMutexRequestNoResumeDebug,
122 (PFNRT)RTSemMutexRelease,
123 (PFNRT)RTSemMutexIsOwned,
124 NULL
125};
126#endif /* RT_OS_DARWIN || RT_OS_SOLARIS */
127
128
129/**
130 * Reserves memory in which the VMM can relocate any guest mappings
131 * that are floating around.
132 *
133 * This operation is a little bit tricky since the VMM might not accept
134 * just any address because of address clashes between the three contexts
135 * it operates in, so use a small stack to perform this operation.
136 *
137 * @returns VBox status code (ignored).
138 * @param pDevExt The device extension.
139 */
140static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
141{
142 /*
143 * Query the required space.
144 */
145 VMMDevReqHypervisorInfo *pReq;
146 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
147 if (RT_FAILURE(rc))
148 return rc;
149 pReq->hypervisorStart = 0;
150 pReq->hypervisorSize = 0;
151 rc = VbglGRPerform(&pReq->header);
152 if (RT_FAILURE(rc)) /* this shouldn't happen! */
153 {
154 VbglGRFree(&pReq->header);
155 return rc;
156 }
157
158 /*
159 * The VMM will report back if there is nothing it wants to map, like for
160 * instance in VT-x and AMD-V mode.
161 */
162 if (pReq->hypervisorSize == 0)
163 Log(("vboxGuestInitFixateGuestMappings: nothing to do\n"));
164 else
165 {
166 /*
167 * We have to try several times since the host can be picky
168 * about certain addresses.
169 */
170 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
171 uint32_t cbHypervisor = pReq->hypervisorSize;
172 RTR0MEMOBJ ahTries[5];
173 uint32_t iTry;
174 bool fBitched = false;
175 Log(("vboxGuestInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
176 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
177 {
178 /*
179 * Reserve space, or if that isn't supported, create a object for
180 * some fictive physical memory and map that in to kernel space.
181 *
182 * To make the code a bit uglier, most systems cannot help with
183 * 4MB alignment, so we have to deal with that in addition to
184 * having two ways of getting the memory.
185 */
186 uint32_t uAlignment = _4M;
187 RTR0MEMOBJ hObj;
188 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
189 if (rc == VERR_NOT_SUPPORTED)
190 {
191 uAlignment = PAGE_SIZE;
192 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
193 }
194 /*
195 * If both RTR0MemObjReserveKernel calls above failed because either not supported or
196 * not implemented at all at the current platform, try to map the memory object into the
197 * virtual kernel space.
198 */
199 if (rc == VERR_NOT_SUPPORTED)
200 {
201 if (hFictive == NIL_RTR0MEMOBJ)
202 {
203 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
204 if (RT_FAILURE(rc))
205 break;
206 hFictive = hObj;
207 }
208 uAlignment = _4M;
209 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
210 if (rc == VERR_NOT_SUPPORTED)
211 {
212 uAlignment = PAGE_SIZE;
213 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
214 }
215 }
216 if (RT_FAILURE(rc))
217 {
218 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
219 rc, cbHypervisor, uAlignment, iTry));
220 fBitched = true;
221 break;
222 }
223
224 /*
225 * Try set it.
226 */
227 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
228 pReq->header.rc = VERR_INTERNAL_ERROR;
229 pReq->hypervisorSize = cbHypervisor;
230 pReq->hypervisorStart = (uintptr_t)RTR0MemObjAddress(hObj);
231 if ( uAlignment == PAGE_SIZE
232 && pReq->hypervisorStart & (_4M - 1))
233 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
234 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
235
236 rc = VbglGRPerform(&pReq->header);
237 if (RT_SUCCESS(rc))
238 {
239 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
240 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
241 RTR0MemObjAddress(pDevExt->hGuestMappings),
242 RTR0MemObjSize(pDevExt->hGuestMappings),
243 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
244 break;
245 }
246 ahTries[iTry] = hObj;
247 }
248
249 /*
250 * Cleanup failed attempts.
251 */
252 while (iTry-- > 0)
253 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
254 if ( RT_FAILURE(rc)
255 && hFictive != NIL_RTR0PTR)
256 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
257 if (RT_FAILURE(rc) && !fBitched)
258 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
259 }
260 VbglGRFree(&pReq->header);
261
262 /*
263 * We ignore failed attempts for now.
264 */
265 return VINF_SUCCESS;
266}
267
268
269/**
270 * Undo what vboxGuestInitFixateGuestMappings did.
271 *
272 * @param pDevExt The device extension.
273 */
274static void vboxGuestTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
275{
276 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
277 {
278 /*
279 * Tell the host that we're going to free the memory we reserved for
280 * it, the free it up. (Leak the memory if anything goes wrong here.)
281 */
282 VMMDevReqHypervisorInfo *pReq;
283 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
284 if (RT_SUCCESS(rc))
285 {
286 pReq->hypervisorStart = 0;
287 pReq->hypervisorSize = 0;
288 rc = VbglGRPerform(&pReq->header);
289 VbglGRFree(&pReq->header);
290 }
291 if (RT_SUCCESS(rc))
292 {
293 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
294 AssertRC(rc);
295 }
296 else
297 LogRel(("vboxGuestTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
298
299 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
300 }
301}
302
303
304/**
305 * Sets the interrupt filter mask during initialization and termination.
306 *
307 * This will ASSUME that we're the ones in carge over the mask, so
308 * we'll simply clear all bits we don't set.
309 *
310 * @returns VBox status code (ignored).
311 * @param pDevExt The device extension.
312 * @param fMask The new mask.
313 */
314static int vboxGuestSetFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask)
315{
316 VMMDevCtlGuestFilterMask *pReq;
317 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
318 if (RT_SUCCESS(rc))
319 {
320 pReq->u32OrMask = fMask;
321 pReq->u32NotMask = ~fMask;
322 rc = VbglGRPerform(&pReq->header);
323 if (RT_FAILURE(rc))
324 LogRel(("vboxGuestSetFilterMask: failed with rc=%Rrc\n", rc));
325 VbglGRFree(&pReq->header);
326 }
327 return rc;
328}
329
330
331/**
332 * Inflate the balloon by one chunk represented by an R0 memory object.
333 *
334 * The caller owns the balloon mutex.
335 *
336 * @returns IPRT status code.
337 * @param pMemObj Pointer to the R0 memory object.
338 * @param pReq The pre-allocated request for performing the VMMDev call.
339 */
340static int vboxGuestBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
341{
342 uint32_t iPage;
343 int rc;
344
345 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
346 {
347 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
348 pReq->aPhysPage[iPage] = phys;
349 }
350
351 pReq->fInflate = true;
352 pReq->header.size = cbChangeMemBalloonReq;
353 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
354
355 rc = VbglGRPerform(&pReq->header);
356 if (RT_FAILURE(rc))
357 LogRel(("vboxGuestBalloonInflate: VbglGRPerform failed. rc=%Rrc\n", rc));
358 return rc;
359}
360
361
362/**
363 * Deflate the balloon by one chunk - info the host and free the memory object.
364 *
365 * The caller owns the balloon mutex.
366 *
367 * @returns IPRT status code.
368 * @param pMemObj Pointer to the R0 memory object.
369 * The memory object will be freed afterwards.
370 * @param pReq The pre-allocated request for performing the VMMDev call.
371 */
372static int vboxGuestBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
373{
374 uint32_t iPage;
375 int rc;
376
377 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
378 {
379 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
380 pReq->aPhysPage[iPage] = phys;
381 }
382
383 pReq->fInflate = false;
384 pReq->header.size = cbChangeMemBalloonReq;
385 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
386
387 rc = VbglGRPerform(&pReq->header);
388 if (RT_FAILURE(rc))
389 {
390 LogRel(("vboxGuestBalloonDeflate: VbglGRPerform failed. rc=%Rrc\n", rc));
391 return rc;
392 }
393
394 rc = RTR0MemObjFree(*pMemObj, true);
395 if (RT_FAILURE(rc))
396 {
397 LogRel(("vboxGuestBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
398 return rc;
399 }
400
401 *pMemObj = NIL_RTR0MEMOBJ;
402 return VINF_SUCCESS;
403}
404
405
406/**
407 * Inflate/deflate the memory balloon and notify the host.
408 *
409 * This is a worker used by VBoxGuestCommonIOCtl_CheckMemoryBalloon - it takes
410 * the mutex.
411 *
412 * @returns VBox status code.
413 * @param pDevExt The device extension.
414 * @param pSession The session.
415 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
416 * @param pfHandleInR3 Where to return the handle-in-ring3 indicator
417 * (VINF_SUCCESS if set).
418 */
419static int vboxGuestSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, uint32_t *pfHandleInR3)
420{
421 int rc = VINF_SUCCESS;
422
423 if (pDevExt->MemBalloon.fUseKernelAPI)
424 {
425 VMMDevChangeMemBalloon *pReq;
426 uint32_t i;
427
428 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
429 {
430 LogRel(("vboxGuestSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
431 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
432 return VERR_INVALID_PARAMETER;
433 }
434
435 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
436 return VINF_SUCCESS; /* nothing to do */
437
438 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
439 && !pDevExt->MemBalloon.paMemObj)
440 {
441 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
442 if (!pDevExt->MemBalloon.paMemObj)
443 {
444 LogRel(("VBoxGuestSetBalloonSizeKernel: no memory for paMemObj!\n"));
445 return VERR_NO_MEMORY;
446 }
447 }
448
449 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
450 if (RT_FAILURE(rc))
451 return rc;
452
453 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
454 {
455 /* inflate */
456 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
457 {
458 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
459 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
460 if (RT_FAILURE(rc))
461 {
462 if (rc == VERR_NOT_SUPPORTED)
463 {
464 /* not supported -- fall back to the R3-allocated memory. */
465 rc = VINF_SUCCESS;
466 pDevExt->MemBalloon.fUseKernelAPI = false;
467 Assert(pDevExt->MemBalloon.cChunks == 0);
468 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
469 }
470 /* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
471 * cannot allocate more memory => don't try further, just stop here */
472 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
473 break;
474 }
475
476 rc = vboxGuestBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
477 if (RT_FAILURE(rc))
478 {
479 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
480 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
481 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
482 break;
483 }
484 pDevExt->MemBalloon.cChunks++;
485 }
486 }
487 else
488 {
489 /* deflate */
490 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
491 {
492 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
493 if (RT_FAILURE(rc))
494 {
495 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
496 break;
497 }
498 pDevExt->MemBalloon.cChunks--;
499 }
500 }
501
502 VbglGRFree(&pReq->header);
503 }
504
505 /*
506 * Set the handle-in-ring3 indicator. When set Ring-3 will have to work
507 * the balloon changes via the other API.
508 */
509 *pfHandleInR3 = pDevExt->MemBalloon.fUseKernelAPI ? false : true;
510
511 return rc;
512}
513
514
515/**
516 * Helper to reinit the VBoxVMM communication after hibernation.
517 *
518 * @returns VBox status code.
519 * @param pDevExt The device extension.
520 * @param enmOSType The OS type.
521 */
522int VBoxGuestReinitDevExtAfterHibernation(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
523{
524 int rc = VBoxGuestReportGuestInfo(enmOSType);
525 if (RT_SUCCESS(rc))
526 {
527 rc = VBoxGuestReportDriverStatus(true /* Driver is active */);
528 if (RT_FAILURE(rc))
529 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: could not report guest driver status, rc=%Rrc\n", rc));
530 }
531 else
532 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: could not report guest information to host, rc=%Rrc\n", rc));
533 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: returned with rc=%Rrc\n", rc));
534 return rc;
535}
536
537
538/**
539 * Inflate/deflate the balloon by one chunk.
540 *
541 * Worker for VBoxGuestCommonIOCtl_ChangeMemoryBalloon - it takes the mutex.
542 *
543 * @returns VBox status code.
544 * @param pDevExt The device extension.
545 * @param pSession The session.
546 * @param u64ChunkAddr The address of the chunk to add to / remove from the
547 * balloon.
548 * @param fInflate Inflate if true, deflate if false.
549 */
550static int vboxGuestSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
551 uint64_t u64ChunkAddr, bool fInflate)
552{
553 VMMDevChangeMemBalloon *pReq;
554 int rc = VINF_SUCCESS;
555 uint32_t i;
556 PRTR0MEMOBJ pMemObj = NULL;
557
558 if (fInflate)
559 {
560 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
561 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
562 {
563 LogRel(("vboxGuestSetBalloonSize: cannot inflate balloon, already have %u chunks (max=%u)\n",
564 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
565 return VERR_INVALID_PARAMETER;
566 }
567
568 if (!pDevExt->MemBalloon.paMemObj)
569 {
570 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
571 if (!pDevExt->MemBalloon.paMemObj)
572 {
573 LogRel(("VBoxGuestSetBalloonSizeFromUser: no memory for paMemObj!\n"));
574 return VERR_NO_MEMORY;
575 }
576 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
577 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
578 }
579 }
580 else
581 {
582 if (pDevExt->MemBalloon.cChunks == 0)
583 {
584 AssertMsgFailed(("vboxGuestSetBalloonSize: cannot decrease balloon, already at size 0\n"));
585 return VERR_INVALID_PARAMETER;
586 }
587 }
588
589 /*
590 * Enumerate all memory objects and check if the object is already registered.
591 */
592 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
593 {
594 if ( fInflate
595 && !pMemObj
596 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
597 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
598 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == u64ChunkAddr)
599 {
600 if (fInflate)
601 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
602 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
603 break;
604 }
605 }
606 if (!pMemObj)
607 {
608 if (fInflate)
609 {
610 /* no free object pointer found -- should not happen */
611 return VERR_NO_MEMORY;
612 }
613
614 /* cannot free this memory as it wasn't provided before */
615 return VERR_NOT_FOUND;
616 }
617
618 /*
619 * Try inflate / default the balloon as requested.
620 */
621 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
622 if (RT_FAILURE(rc))
623 return rc;
624
625 if (fInflate)
626 {
627 rc = RTR0MemObjLockUser(pMemObj, (RTR3PTR)u64ChunkAddr, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
628 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
629 if (RT_SUCCESS(rc))
630 {
631 rc = vboxGuestBalloonInflate(pMemObj, pReq);
632 if (RT_SUCCESS(rc))
633 pDevExt->MemBalloon.cChunks++;
634 else
635 {
636 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
637 RTR0MemObjFree(*pMemObj, true);
638 *pMemObj = NIL_RTR0MEMOBJ;
639 }
640 }
641 }
642 else
643 {
644 rc = vboxGuestBalloonDeflate(pMemObj, pReq);
645 if (RT_SUCCESS(rc))
646 pDevExt->MemBalloon.cChunks--;
647 else
648 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
649 }
650
651 VbglGRFree(&pReq->header);
652 return rc;
653}
654
655
656/**
657 * Cleanup the memory balloon of a session.
658 *
659 * Will request the balloon mutex, so it must be valid and the caller must not
660 * own it already.
661 *
662 * @param pDevExt The device extension.
663 * @param pDevExt The session. Can be NULL at unload.
664 */
665static void vboxGuestCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
666{
667 RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
668 if ( pDevExt->MemBalloon.pOwner == pSession
669 || pSession == NULL /*unload*/)
670 {
671 if (pDevExt->MemBalloon.paMemObj)
672 {
673 VMMDevChangeMemBalloon *pReq;
674 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
675 if (RT_SUCCESS(rc))
676 {
677 uint32_t i;
678 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
679 {
680 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
681 if (RT_FAILURE(rc))
682 {
683 LogRel(("vboxGuestCloseMemBalloon: Deflate failed with rc=%Rrc. Will leak %u chunks.\n",
684 rc, pDevExt->MemBalloon.cChunks));
685 break;
686 }
687 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
688 pDevExt->MemBalloon.cChunks--;
689 }
690 VbglGRFree(&pReq->header);
691 }
692 else
693 LogRel(("vboxGuestCloseMemBalloon: Failed to allocate VMMDev request buffer (rc=%Rrc). Will leak %u chunks.\n",
694 rc, pDevExt->MemBalloon.cChunks));
695 RTMemFree(pDevExt->MemBalloon.paMemObj);
696 pDevExt->MemBalloon.paMemObj = NULL;
697 }
698
699 pDevExt->MemBalloon.pOwner = NULL;
700 }
701 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
702}
703
704
705/**
706 * Initializes the VBoxGuest device extension when the
707 * device driver is loaded.
708 *
709 * The native code locates the VMMDev on the PCI bus and retrieve
710 * the MMIO and I/O port ranges, this function will take care of
711 * mapping the MMIO memory (if present). Upon successful return
712 * the native code should set up the interrupt handler.
713 *
714 * @returns VBox status code.
715 *
716 * @param pDevExt The device extension. Allocated by the native code.
717 * @param IOPortBase The base of the I/O port range.
718 * @param pvMMIOBase The base of the MMIO memory mapping.
719 * This is optional, pass NULL if not present.
720 * @param cbMMIO The size of the MMIO memory mapping.
721 * This is optional, pass 0 if not present.
722 * @param enmOSType The guest OS type to report to the VMMDev.
723 * @param fFixedEvents Events that will be enabled upon init and no client
724 * will ever be allowed to mask.
725 */
726int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
727 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
728{
729 int rc, rc2;
730 unsigned i;
731
732 /*
733 * Adjust fFixedEvents.
734 */
735#ifdef VBOX_WITH_HGCM
736 fFixedEvents |= VMMDEV_EVENT_HGCM;
737#endif
738
739 /*
740 * Initialize the data.
741 */
742 pDevExt->IOPortBase = IOPortBase;
743 pDevExt->pVMMDevMemory = NULL;
744 pDevExt->fFixedEvents = fFixedEvents;
745 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
746 pDevExt->EventSpinlock = NIL_RTSPINLOCK;
747 pDevExt->pIrqAckEvents = NULL;
748 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
749 RTListInit(&pDevExt->WaitList);
750#ifdef VBOX_WITH_HGCM
751 RTListInit(&pDevExt->HGCMWaitList);
752#endif
753#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
754 RTListInit(&pDevExt->WakeUpList);
755#endif
756 RTListInit(&pDevExt->WokenUpList);
757 RTListInit(&pDevExt->FreeList);
758#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
759 pDevExt->fVRDPEnabled = false;
760#endif
761 pDevExt->fLoggingEnabled = false;
762 pDevExt->f32PendingEvents = 0;
763 pDevExt->u32MousePosChangedSeq = 0;
764 pDevExt->SessionSpinlock = NIL_RTSPINLOCK;
765 pDevExt->MemBalloon.hMtx = NIL_RTSEMFASTMUTEX;
766 pDevExt->MemBalloon.cChunks = 0;
767 pDevExt->MemBalloon.cMaxChunks = 0;
768 pDevExt->MemBalloon.fUseKernelAPI = true;
769 pDevExt->MemBalloon.paMemObj = NULL;
770 pDevExt->MemBalloon.pOwner = NULL;
771 for (i = 0; i < RT_ELEMENTS(pDevExt->acMouseFeatureUsage); ++i)
772 pDevExt->acMouseFeatureUsage[i] = 0;
773 pDevExt->fMouseStatus = 0;
774 pDevExt->MouseNotifyCallback.pfnNotify = NULL;
775 pDevExt->MouseNotifyCallback.pvUser = NULL;
776 pDevExt->cISR = 0;
777
778 /*
779 * If there is an MMIO region validate the version and size.
780 */
781 if (pvMMIOBase)
782 {
783 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
784 Assert(cbMMIO);
785 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
786 && pVMMDev->u32Size >= 32
787 && pVMMDev->u32Size <= cbMMIO)
788 {
789 pDevExt->pVMMDevMemory = pVMMDev;
790 Log(("VBoxGuestInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
791 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
792 }
793 else /* try live without it. */
794 LogRel(("VBoxGuestInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
795 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
796 }
797
798 pDevExt->u32GuestCapsAcquireMode = 0;
799 pDevExt->u32GuestCaps = 0;
800
801 /*
802 * Create the wait and session spinlocks as well as the ballooning mutex.
803 */
804 rc = RTSpinlockCreate(&pDevExt->EventSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestEvent");
805 if (RT_SUCCESS(rc))
806 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestSession");
807 if (RT_FAILURE(rc))
808 {
809 LogRel(("VBoxGuestInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
810 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
811 RTSpinlockDestroy(pDevExt->EventSpinlock);
812 return rc;
813 }
814
815 rc = RTSemFastMutexCreate(&pDevExt->MemBalloon.hMtx);
816 if (RT_FAILURE(rc))
817 {
818 LogRel(("VBoxGuestInitDevExt: failed to create mutex, rc=%Rrc!\n", rc));
819 RTSpinlockDestroy(pDevExt->SessionSpinlock);
820 RTSpinlockDestroy(pDevExt->EventSpinlock);
821 return rc;
822 }
823
824 /*
825 * Initialize the guest library and report the guest info back to VMMDev,
826 * set the interrupt control filter mask, and fixate the guest mappings
827 * made by the VMM.
828 */
829 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
830 if (RT_SUCCESS(rc))
831 {
832 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
833 if (RT_SUCCESS(rc))
834 {
835 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
836 Assert(pDevExt->PhysIrqAckEvents != 0);
837
838 rc = VBoxGuestReportGuestInfo(enmOSType);
839 if (RT_SUCCESS(rc))
840 {
841 rc = vboxGuestSetFilterMask(pDevExt, fFixedEvents);
842 if (RT_SUCCESS(rc))
843 {
844 /*
845 * Disable guest graphics capability by default. The guest specific
846 * graphics driver will re-enable this when it is necessary.
847 */
848 rc = VBoxGuestSetGuestCapabilities(0, VMMDEV_GUEST_SUPPORTS_GRAPHICS);
849 if (RT_SUCCESS(rc))
850 {
851 vboxGuestInitFixateGuestMappings(pDevExt);
852
853#ifdef DEBUG
854 testSetMouseStatus(); /* Other tests? */
855#endif
856
857 rc = VBoxGuestReportDriverStatus(true /* Driver is active */);
858 if (RT_FAILURE(rc))
859 LogRel(("VBoxGuestInitDevExt: VBoxReportGuestDriverStatus failed, rc=%Rrc\n", rc));
860
861 Log(("VBoxGuestInitDevExt: returns success\n"));
862 return VINF_SUCCESS;
863 }
864
865 LogRel(("VBoxGuestInitDevExt: VBoxGuestSetGuestCapabilities failed, rc=%Rrc\n", rc));
866 }
867 else
868 LogRel(("VBoxGuestInitDevExt: vboxGuestSetFilterMask failed, rc=%Rrc\n", rc));
869 }
870 else
871 LogRel(("VBoxGuestInitDevExt: VBoxReportGuestInfo failed, rc=%Rrc\n", rc));
872 VbglGRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
873 }
874 else
875 LogRel(("VBoxGuestInitDevExt: VBoxGRAlloc failed, rc=%Rrc\n", rc));
876
877 VbglTerminate();
878 }
879 else
880 LogRel(("VBoxGuestInitDevExt: VbglInit failed, rc=%Rrc\n", rc));
881
882 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
883 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
884 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
885 return rc; /* (failed) */
886}
887
888
889/**
890 * Deletes all the items in a wait chain.
891 * @param pList The head of the chain.
892 */
893static void VBoxGuestDeleteWaitList(PRTLISTNODE pList)
894{
895 while (!RTListIsEmpty(pList))
896 {
897 int rc2;
898 PVBOXGUESTWAIT pWait = RTListGetFirst(pList, VBOXGUESTWAIT, ListNode);
899 RTListNodeRemove(&pWait->ListNode);
900
901 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
902 pWait->Event = NIL_RTSEMEVENTMULTI;
903 pWait->pSession = NULL;
904 RTMemFree(pWait);
905 }
906}
907
908
909/**
910 * Destroys the VBoxGuest device extension.
911 *
912 * The native code should call this before the driver is loaded,
913 * but don't call this on shutdown.
914 *
915 * @param pDevExt The device extension.
916 */
917void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
918{
919 int rc2;
920 Log(("VBoxGuestDeleteDevExt:\n"));
921 Log(("VBoxGuest: The additions driver is terminating.\n"));
922
923 /*
924 * Clean up the bits that involves the host first.
925 */
926 vboxGuestTermUnfixGuestMappings(pDevExt);
927 VBoxGuestSetGuestCapabilities(0, UINT32_MAX); /* clears all capabilities */
928 vboxGuestSetFilterMask(pDevExt, 0); /* filter all events */
929 vboxGuestCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
930
931 /*
932 * Cleanup all the other resources.
933 */
934 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
935 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
936 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
937
938 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
939#ifdef VBOX_WITH_HGCM
940 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
941#endif
942#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
943 VBoxGuestDeleteWaitList(&pDevExt->WakeUpList);
944#endif
945 VBoxGuestDeleteWaitList(&pDevExt->WokenUpList);
946 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
947
948 VbglTerminate();
949
950 pDevExt->pVMMDevMemory = NULL;
951
952 pDevExt->IOPortBase = 0;
953 pDevExt->pIrqAckEvents = NULL;
954}
955
956
957/**
958 * Creates a VBoxGuest user session.
959 *
960 * The native code calls this when a ring-3 client opens the device.
961 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
962 *
963 * @returns VBox status code.
964 * @param pDevExt The device extension.
965 * @param ppSession Where to store the session on success.
966 */
967int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
968{
969 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
970 if (RT_UNLIKELY(!pSession))
971 {
972 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
973 return VERR_NO_MEMORY;
974 }
975
976 pSession->Process = RTProcSelf();
977 pSession->R0Process = RTR0ProcHandleSelf();
978 pSession->pDevExt = pDevExt;
979
980 *ppSession = pSession;
981 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
982 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
983 return VINF_SUCCESS;
984}
985
986
987/**
988 * Creates a VBoxGuest kernel session.
989 *
990 * The native code calls this when a ring-0 client connects to the device.
991 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
992 *
993 * @returns VBox status code.
994 * @param pDevExt The device extension.
995 * @param ppSession Where to store the session on success.
996 */
997int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
998{
999 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
1000 if (RT_UNLIKELY(!pSession))
1001 {
1002 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
1003 return VERR_NO_MEMORY;
1004 }
1005
1006 pSession->Process = NIL_RTPROCESS;
1007 pSession->R0Process = NIL_RTR0PROCESS;
1008 pSession->pDevExt = pDevExt;
1009
1010 *ppSession = pSession;
1011 LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1012 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1013 return VINF_SUCCESS;
1014}
1015
1016
1017
1018/**
1019 * Closes a VBoxGuest session.
1020 *
1021 * @param pDevExt The device extension.
1022 * @param pSession The session to close (and free).
1023 */
1024void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1025{
1026 unsigned i; NOREF(i);
1027 Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1028 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1029
1030 VBoxGuestCommonGuestCapsAcquire(pDevExt, pSession, 0, UINT32_MAX);
1031
1032#ifdef VBOX_WITH_HGCM
1033 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1034 if (pSession->aHGCMClientIds[i])
1035 {
1036 VBoxGuestHGCMDisconnectInfo Info;
1037 Info.result = 0;
1038 Info.u32ClientID = pSession->aHGCMClientIds[i];
1039 pSession->aHGCMClientIds[i] = 0;
1040 Log(("VBoxGuestCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
1041 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1042 }
1043#endif
1044
1045 pSession->pDevExt = NULL;
1046 pSession->Process = NIL_RTPROCESS;
1047 pSession->R0Process = NIL_RTR0PROCESS;
1048 vboxGuestCloseMemBalloon(pDevExt, pSession);
1049 /* Reset any mouse status flags which the session may have set. */
1050 VBoxGuestCommonIOCtl_SetMouseStatus(pDevExt, pSession, 0);
1051 RTMemFree(pSession);
1052}
1053
1054
1055/**
1056 * Allocates a wait-for-event entry.
1057 *
1058 * @returns The wait-for-event entry.
1059 * @param pDevExt The device extension.
1060 * @param pSession The session that's allocating this. Can be NULL.
1061 */
1062static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1063{
1064 /*
1065 * Allocate it one way or the other.
1066 */
1067 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1068 if (pWait)
1069 {
1070 RTSpinlockAcquire(pDevExt->EventSpinlock);
1071
1072 pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1073 if (pWait)
1074 RTListNodeRemove(&pWait->ListNode);
1075
1076 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1077 }
1078 if (!pWait)
1079 {
1080 static unsigned s_cErrors = 0;
1081 int rc;
1082
1083 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1084 if (!pWait)
1085 {
1086 if (s_cErrors++ < 32)
1087 LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
1088 return NULL;
1089 }
1090
1091 rc = RTSemEventMultiCreate(&pWait->Event);
1092 if (RT_FAILURE(rc))
1093 {
1094 if (s_cErrors++ < 32)
1095 LogRel(("VBoxGuestCommonIOCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
1096 RTMemFree(pWait);
1097 return NULL;
1098 }
1099
1100 pWait->ListNode.pNext = NULL;
1101 pWait->ListNode.pPrev = NULL;
1102 }
1103
1104 /*
1105 * Zero members just as an precaution.
1106 */
1107 pWait->fReqEvents = 0;
1108 pWait->fResEvents = 0;
1109#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1110 pWait->fPendingWakeUp = false;
1111 pWait->fFreeMe = false;
1112#endif
1113 pWait->pSession = pSession;
1114#ifdef VBOX_WITH_HGCM
1115 pWait->pHGCMReq = NULL;
1116#endif
1117 RTSemEventMultiReset(pWait->Event);
1118 return pWait;
1119}
1120
1121
1122/**
1123 * Frees the wait-for-event entry.
1124 *
1125 * The caller must own the wait spinlock !
1126 * The entry must be in a list!
1127 *
1128 * @param pDevExt The device extension.
1129 * @param pWait The wait-for-event entry to free.
1130 */
1131static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1132{
1133 pWait->fReqEvents = 0;
1134 pWait->fResEvents = 0;
1135#ifdef VBOX_WITH_HGCM
1136 pWait->pHGCMReq = NULL;
1137#endif
1138#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1139 Assert(!pWait->fFreeMe);
1140 if (pWait->fPendingWakeUp)
1141 pWait->fFreeMe = true;
1142 else
1143#endif
1144 {
1145 RTListNodeRemove(&pWait->ListNode);
1146 RTListAppend(&pDevExt->FreeList, &pWait->ListNode);
1147 }
1148}
1149
1150
1151/**
1152 * Frees the wait-for-event entry.
1153 *
1154 * @param pDevExt The device extension.
1155 * @param pWait The wait-for-event entry to free.
1156 */
1157static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1158{
1159 RTSpinlockAcquire(pDevExt->EventSpinlock);
1160 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1161 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1162}
1163
1164
1165#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1166/**
1167 * Processes the wake-up list.
1168 *
1169 * All entries in the wake-up list gets signalled and moved to the woken-up
1170 * list.
1171 *
1172 * @param pDevExt The device extension.
1173 */
1174void VBoxGuestWaitDoWakeUps(PVBOXGUESTDEVEXT pDevExt)
1175{
1176 if (!RTListIsEmpty(&pDevExt->WakeUpList))
1177 {
1178 RTSpinlockAcquire(pDevExt->EventSpinlock);
1179 for (;;)
1180 {
1181 int rc;
1182 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->WakeUpList, VBOXGUESTWAIT, ListNode);
1183 if (!pWait)
1184 break;
1185 pWait->fPendingWakeUp = true;
1186 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1187
1188 rc = RTSemEventMultiSignal(pWait->Event);
1189 AssertRC(rc);
1190
1191 RTSpinlockAcquire(pDevExt->EventSpinlock);
1192 pWait->fPendingWakeUp = false;
1193 if (!pWait->fFreeMe)
1194 {
1195 RTListNodeRemove(&pWait->ListNode);
1196 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1197 }
1198 else
1199 {
1200 pWait->fFreeMe = false;
1201 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1202 }
1203 }
1204 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1205 }
1206}
1207#endif /* VBOXGUEST_USE_DEFERRED_WAKE_UP */
1208
1209
1210/**
1211 * Modifies the guest capabilities.
1212 *
1213 * Should be called during driver init and termination.
1214 *
1215 * @returns VBox status code.
1216 * @param fOr The Or mask (what to enable).
1217 * @param fNot The Not mask (what to disable).
1218 */
1219int VBoxGuestSetGuestCapabilities(uint32_t fOr, uint32_t fNot)
1220{
1221 VMMDevReqGuestCapabilities2 *pReq;
1222 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
1223 if (RT_FAILURE(rc))
1224 {
1225 Log(("VBoxGuestSetGuestCapabilities: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1226 sizeof(*pReq), sizeof(*pReq), rc));
1227 return rc;
1228 }
1229
1230 pReq->u32OrMask = fOr;
1231 pReq->u32NotMask = fNot;
1232
1233 rc = VbglGRPerform(&pReq->header);
1234 if (RT_FAILURE(rc))
1235 Log(("VBoxGuestSetGuestCapabilities: VbglGRPerform failed, rc=%Rrc!\n", rc));
1236
1237 VbglGRFree(&pReq->header);
1238 return rc;
1239}
1240
1241
1242/**
1243 * Implements the fast (no input or output) type of IOCtls.
1244 *
1245 * This is currently just a placeholder stub inherited from the support driver code.
1246 *
1247 * @returns VBox status code.
1248 * @param iFunction The IOCtl function number.
1249 * @param pDevExt The device extension.
1250 * @param pSession The session.
1251 */
1252int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1253{
1254 Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1255
1256 NOREF(iFunction);
1257 NOREF(pDevExt);
1258 NOREF(pSession);
1259 return VERR_NOT_SUPPORTED;
1260}
1261
1262
1263/**
1264 * Return the VMM device port.
1265 *
1266 * returns IPRT status code.
1267 * @param pDevExt The device extension.
1268 * @param pInfo The request info.
1269 * @param pcbDataReturned (out) contains the number of bytes to return.
1270 */
1271static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
1272{
1273 Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
1274 pInfo->portAddress = pDevExt->IOPortBase;
1275 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
1276 if (pcbDataReturned)
1277 *pcbDataReturned = sizeof(*pInfo);
1278 return VINF_SUCCESS;
1279}
1280
1281
1282#ifndef RT_OS_WINDOWS
1283/**
1284 * Set the callback for the kernel mouse handler.
1285 *
1286 * returns IPRT status code.
1287 * @param pDevExt The device extension.
1288 * @param pNotify The new callback information.
1289 * @note This function takes the session spinlock to update the callback
1290 * information, but the interrupt handler will not do this. To make
1291 * sure that the interrupt handler sees a consistent structure, we
1292 * set the function pointer to NULL before updating the data and only
1293 * set it to the correct value once the data is updated. Since the
1294 * interrupt handler executes atomically this ensures that the data is
1295 * valid if the function pointer is non-NULL.
1296 */
1297int VBoxGuestCommonIOCtl_SetMouseNotifyCallback(PVBOXGUESTDEVEXT pDevExt, VBoxGuestMouseSetNotifyCallback *pNotify)
1298{
1299 Log(("VBoxGuestCommonIOCtl: SET_MOUSE_NOTIFY_CALLBACK\n"));
1300
1301 RTSpinlockAcquire(pDevExt->EventSpinlock);
1302 pDevExt->MouseNotifyCallback = *pNotify;
1303 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1304
1305 /* Make sure no active ISR is referencing the old data - hacky but should be
1306 * effective. */
1307 while (pDevExt->cISR > 0)
1308 ASMNopPause();
1309
1310 return VINF_SUCCESS;
1311}
1312#endif
1313
1314
1315/**
1316 * Worker VBoxGuestCommonIOCtl_WaitEvent.
1317 *
1318 * The caller enters the spinlock, we leave it.
1319 *
1320 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
1321 */
1322DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestWaitEventInfo *pInfo,
1323 int iEvent, const uint32_t fReqEvents)
1324{
1325 uint32_t fMatches = VBoxGuestCommonGetAndCleanPendingEventsLocked(pDevExt, pSession, fReqEvents);
1326 if (fMatches)
1327 {
1328 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1329
1330 pInfo->u32EventFlagsOut = fMatches;
1331 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1332 if (fReqEvents & ~((uint32_t)1 << iEvent))
1333 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1334 else
1335 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1336 return VINF_SUCCESS;
1337 }
1338 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1339 return VERR_TIMEOUT;
1340}
1341
1342
1343static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1344 VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned, bool fInterruptible)
1345{
1346 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
1347 uint32_t fResEvents;
1348 int iEvent;
1349 PVBOXGUESTWAIT pWait;
1350 int rc;
1351
1352 pInfo->u32EventFlagsOut = 0;
1353 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1354 if (pcbDataReturned)
1355 *pcbDataReturned = sizeof(*pInfo);
1356
1357 /*
1358 * Copy and verify the input mask.
1359 */
1360 iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
1361 if (RT_UNLIKELY(iEvent < 0))
1362 {
1363 Log(("VBoxGuestCommonIOCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
1364 return VERR_INVALID_PARAMETER;
1365 }
1366
1367 /*
1368 * Check the condition up front, before doing the wait-for-event allocations.
1369 */
1370 RTSpinlockAcquire(pDevExt->EventSpinlock);
1371 rc = WaitEventCheckCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
1372 if (rc == VINF_SUCCESS)
1373 return rc;
1374
1375 if (!pInfo->u32TimeoutIn)
1376 {
1377 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1378 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
1379 return VERR_TIMEOUT;
1380 }
1381
1382 pWait = VBoxGuestWaitAlloc(pDevExt, pSession);
1383 if (!pWait)
1384 return VERR_NO_MEMORY;
1385 pWait->fReqEvents = fReqEvents;
1386
1387 /*
1388 * We've got the wait entry now, re-enter the spinlock and check for the condition.
1389 * If the wait condition is met, return.
1390 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
1391 */
1392 RTSpinlockAcquire(pDevExt->EventSpinlock);
1393 RTListAppend(&pDevExt->WaitList, &pWait->ListNode);
1394 rc = WaitEventCheckCondition(pDevExt, pSession, pInfo, iEvent, fReqEvents);
1395 if (rc == VINF_SUCCESS)
1396 {
1397 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1398 return rc;
1399 }
1400
1401 if (fInterruptible)
1402 rc = RTSemEventMultiWaitNoResume(pWait->Event,
1403 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1404 else
1405 rc = RTSemEventMultiWait(pWait->Event,
1406 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1407
1408 /*
1409 * There is one special case here and that's when the semaphore is
1410 * destroyed upon device driver unload. This shouldn't happen of course,
1411 * but in case it does, just get out of here ASAP.
1412 */
1413 if (rc == VERR_SEM_DESTROYED)
1414 return rc;
1415
1416 /*
1417 * Unlink the wait item and dispose of it.
1418 */
1419 RTSpinlockAcquire(pDevExt->EventSpinlock);
1420 fResEvents = pWait->fResEvents;
1421 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1422 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1423
1424 /*
1425 * Now deal with the return code.
1426 */
1427 if ( fResEvents
1428 && fResEvents != UINT32_MAX)
1429 {
1430 pInfo->u32EventFlagsOut = fResEvents;
1431 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1432 if (fReqEvents & ~((uint32_t)1 << iEvent))
1433 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1434 else
1435 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1436 rc = VINF_SUCCESS;
1437 }
1438 else if ( fResEvents == UINT32_MAX
1439 || rc == VERR_INTERRUPTED)
1440 {
1441 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
1442 rc = VERR_INTERRUPTED;
1443 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
1444 }
1445 else if (rc == VERR_TIMEOUT)
1446 {
1447 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1448 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT (2)\n"));
1449 }
1450 else
1451 {
1452 if (RT_SUCCESS(rc))
1453 {
1454 static unsigned s_cErrors = 0;
1455 if (s_cErrors++ < 32)
1456 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
1457 rc = VERR_INTERNAL_ERROR;
1458 }
1459 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1460 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc\n", rc));
1461 }
1462
1463 return rc;
1464}
1465
1466
1467static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1468{
1469 PVBOXGUESTWAIT pWait;
1470 PVBOXGUESTWAIT pSafe;
1471 int rc = 0;
1472
1473 Log(("VBoxGuestCommonIOCtl: CANCEL_ALL_WAITEVENTS\n"));
1474
1475 /*
1476 * Walk the event list and wake up anyone with a matching session.
1477 */
1478 RTSpinlockAcquire(pDevExt->EventSpinlock);
1479 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
1480 {
1481 if (pWait->pSession == pSession)
1482 {
1483 pWait->fResEvents = UINT32_MAX;
1484 RTListNodeRemove(&pWait->ListNode);
1485#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1486 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
1487#else
1488 rc |= RTSemEventMultiSignal(pWait->Event);
1489 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1490#endif
1491 }
1492 }
1493 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1494 Assert(rc == 0);
1495
1496#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1497 VBoxGuestWaitDoWakeUps(pDevExt);
1498#endif
1499
1500 return VINF_SUCCESS;
1501}
1502
1503/**
1504 * Checks if the VMM request is allowed in the context of the given session.
1505 *
1506 * @returns VINF_SUCCESS or VERR_PERMISSION_DENIED.
1507 * @param pSession The calling session.
1508 * @param enmType The request type.
1509 * @param pReqHdr The request.
1510 */
1511static int VBoxGuestCheckIfVMMReqAllowed(PVBOXGUESTSESSION pSession, VMMDevRequestType enmType,
1512 VMMDevRequestHeader const *pReqHdr)
1513{
1514 /*
1515 * Categorize the request being made.
1516 */
1517 /** @todo This need quite some more work! */
1518 enum
1519 {
1520 kLevel_Invalid, kLevel_NoOne, kLevel_OnlyVBoxGuest, kLevel_OnlyKernel, kLevel_TrustedUsers, kLevel_AllUsers
1521 } enmRequired;
1522 switch (enmType)
1523 {
1524 /*
1525 * Deny access to anything we don't know or provide specialized I/O controls for.
1526 */
1527#ifdef VBOX_WITH_HGCM
1528 case VMMDevReq_HGCMConnect:
1529 case VMMDevReq_HGCMDisconnect:
1530# ifdef VBOX_WITH_64_BITS_GUESTS
1531 case VMMDevReq_HGCMCall32:
1532 case VMMDevReq_HGCMCall64:
1533# else
1534 case VMMDevReq_HGCMCall:
1535# endif /* VBOX_WITH_64_BITS_GUESTS */
1536 case VMMDevReq_HGCMCancel:
1537 case VMMDevReq_HGCMCancel2:
1538#endif /* VBOX_WITH_HGCM */
1539 default:
1540 enmRequired = kLevel_NoOne;
1541 break;
1542
1543 /*
1544 * There are a few things only this driver can do (and it doesn't use
1545 * the VMMRequst I/O control route anyway, but whatever).
1546 */
1547 case VMMDevReq_ReportGuestInfo:
1548 case VMMDevReq_ReportGuestInfo2:
1549 case VMMDevReq_GetHypervisorInfo:
1550 case VMMDevReq_SetHypervisorInfo:
1551 case VMMDevReq_RegisterPatchMemory:
1552 case VMMDevReq_DeregisterPatchMemory:
1553 case VMMDevReq_GetMemBalloonChangeRequest:
1554 enmRequired = kLevel_OnlyVBoxGuest;
1555 break;
1556
1557 /*
1558 * Trusted users apps only.
1559 */
1560 case VMMDevReq_QueryCredentials:
1561 case VMMDevReq_ReportCredentialsJudgement:
1562 case VMMDevReq_RegisterSharedModule:
1563 case VMMDevReq_UnregisterSharedModule:
1564 case VMMDevReq_WriteCoreDump:
1565 case VMMDevReq_GetCpuHotPlugRequest:
1566 case VMMDevReq_SetCpuHotPlugStatus:
1567 case VMMDevReq_CheckSharedModules:
1568 case VMMDevReq_GetPageSharingStatus:
1569 case VMMDevReq_DebugIsPageShared:
1570 case VMMDevReq_ReportGuestStats:
1571 case VMMDevReq_GetStatisticsChangeRequest:
1572 case VMMDevReq_ChangeMemBalloon:
1573 enmRequired = kLevel_TrustedUsers;
1574 break;
1575
1576 /*
1577 * Anyone.
1578 */
1579 case VMMDevReq_GetMouseStatus:
1580 case VMMDevReq_SetMouseStatus:
1581 case VMMDevReq_SetPointerShape:
1582 case VMMDevReq_GetHostVersion:
1583 case VMMDevReq_Idle:
1584 case VMMDevReq_GetHostTime:
1585 case VMMDevReq_SetPowerStatus:
1586 case VMMDevReq_AcknowledgeEvents:
1587 case VMMDevReq_CtlGuestFilterMask:
1588 case VMMDevReq_ReportGuestStatus:
1589 case VMMDevReq_GetDisplayChangeRequest:
1590 case VMMDevReq_VideoModeSupported:
1591 case VMMDevReq_GetHeightReduction:
1592 case VMMDevReq_GetDisplayChangeRequest2:
1593 case VMMDevReq_SetGuestCapabilities:
1594 case VMMDevReq_VideoModeSupported2:
1595 case VMMDevReq_VideoAccelEnable:
1596 case VMMDevReq_VideoAccelFlush:
1597 case VMMDevReq_VideoSetVisibleRegion:
1598 case VMMDevReq_GetDisplayChangeRequestEx:
1599 case VMMDevReq_GetSeamlessChangeRequest:
1600 case VMMDevReq_GetVRDPChangeRequest:
1601 case VMMDevReq_LogString:
1602 case VMMDevReq_GetSessionId:
1603 enmRequired = kLevel_AllUsers;
1604 break;
1605
1606 /*
1607 * Depends on the request parameters...
1608 */
1609 /** @todo this have to be changed into an I/O control and the facilities
1610 * tracked in the session so they can automatically be failed when the
1611 * session terminates without reporting the new status.
1612 *
1613 * The information presented by IGuest is not reliable without this! */
1614 case VMMDevReq_ReportGuestCapabilities:
1615 switch (((VMMDevReportGuestStatus const *)pReqHdr)->guestStatus.facility)
1616 {
1617 case VBoxGuestFacilityType_All:
1618 case VBoxGuestFacilityType_VBoxGuestDriver:
1619 enmRequired = kLevel_OnlyVBoxGuest;
1620 break;
1621 case VBoxGuestFacilityType_VBoxService:
1622 enmRequired = kLevel_TrustedUsers;
1623 break;
1624 case VBoxGuestFacilityType_VBoxTrayClient:
1625 case VBoxGuestFacilityType_Seamless:
1626 case VBoxGuestFacilityType_Graphics:
1627 default:
1628 enmRequired = kLevel_AllUsers;
1629 break;
1630 }
1631 break;
1632 }
1633
1634 /*
1635 * Check against the session.
1636 */
1637 switch (enmRequired)
1638 {
1639 default:
1640 case kLevel_NoOne:
1641 break;
1642 case kLevel_OnlyVBoxGuest:
1643 case kLevel_OnlyKernel:
1644 if (pSession->R0Process == NIL_RTR0PROCESS)
1645 return VINF_SUCCESS;
1646 break;
1647 case kLevel_TrustedUsers:
1648 case kLevel_AllUsers:
1649 return VINF_SUCCESS;
1650 }
1651
1652 return VERR_PERMISSION_DENIED;
1653}
1654
1655static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1656 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
1657{
1658 int rc;
1659 VMMDevRequestHeader *pReqCopy;
1660
1661 /*
1662 * Validate the header and request size.
1663 */
1664 const VMMDevRequestType enmType = pReqHdr->requestType;
1665 const uint32_t cbReq = pReqHdr->size;
1666 const uint32_t cbMinSize = vmmdevGetRequestSize(enmType);
1667
1668 Log(("VBoxGuestCommonIOCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
1669
1670 if (cbReq < cbMinSize)
1671 {
1672 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
1673 cbReq, cbMinSize, enmType));
1674 return VERR_INVALID_PARAMETER;
1675 }
1676 if (cbReq > cbData)
1677 {
1678 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
1679 cbData, cbReq, enmType));
1680 return VERR_INVALID_PARAMETER;
1681 }
1682 rc = VbglGRVerify(pReqHdr, cbData);
1683 if (RT_FAILURE(rc))
1684 {
1685 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc!!\n",
1686 cbData, cbReq, enmType, rc));
1687 return rc;
1688 }
1689
1690 rc = VBoxGuestCheckIfVMMReqAllowed(pSession, enmType, pReqHdr);
1691 if (RT_FAILURE(rc))
1692 {
1693 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: Operation not allowed! type=%#x rc=%Rrc\n", enmType, rc));
1694 return rc;
1695 }
1696
1697 /*
1698 * Make a copy of the request in the physical memory heap so
1699 * the VBoxGuestLibrary can more easily deal with the request.
1700 * (This is really a waste of time since the OS or the OS specific
1701 * code has already buffered or locked the input/output buffer, but
1702 * it does makes things a bit simpler wrt to phys address.)
1703 */
1704 rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
1705 if (RT_FAILURE(rc))
1706 {
1707 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1708 cbReq, cbReq, rc));
1709 return rc;
1710 }
1711 memcpy(pReqCopy, pReqHdr, cbReq);
1712
1713 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
1714 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
1715
1716 rc = VbglGRPerform(pReqCopy);
1717 if ( RT_SUCCESS(rc)
1718 && RT_SUCCESS(pReqCopy->rc))
1719 {
1720 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
1721 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
1722
1723 memcpy(pReqHdr, pReqCopy, cbReq);
1724 if (pcbDataReturned)
1725 *pcbDataReturned = cbReq;
1726 }
1727 else if (RT_FAILURE(rc))
1728 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
1729 else
1730 {
1731 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
1732 rc = pReqCopy->rc;
1733 }
1734
1735 VbglGRFree(pReqCopy);
1736 return rc;
1737}
1738
1739
1740static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, VBoxGuestFilterMaskInfo *pInfo)
1741{
1742 VMMDevCtlGuestFilterMask *pReq;
1743 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
1744 if (RT_FAILURE(rc))
1745 {
1746 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1747 sizeof(*pReq), sizeof(*pReq), rc));
1748 return rc;
1749 }
1750
1751 pReq->u32OrMask = pInfo->u32OrMask;
1752 pReq->u32NotMask = pInfo->u32NotMask;
1753 pReq->u32NotMask &= ~pDevExt->fFixedEvents; /* don't permit these to be cleared! */
1754 rc = VbglGRPerform(&pReq->header);
1755 if (RT_FAILURE(rc))
1756 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: VbglGRPerform failed, rc=%Rrc!\n", rc));
1757
1758 VbglGRFree(&pReq->header);
1759 return rc;
1760}
1761
1762#ifdef VBOX_WITH_HGCM
1763
1764AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
1765
1766/** Worker for VBoxGuestHGCMAsyncWaitCallback*. */
1767static int VBoxGuestHGCMAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
1768 bool fInterruptible, uint32_t cMillies)
1769{
1770 int rc;
1771
1772 /*
1773 * Check to see if the condition was met by the time we got here.
1774 *
1775 * We create a simple poll loop here for dealing with out-of-memory
1776 * conditions since the caller isn't necessarily able to deal with
1777 * us returning too early.
1778 */
1779 PVBOXGUESTWAIT pWait;
1780 for (;;)
1781 {
1782 RTSpinlockAcquire(pDevExt->EventSpinlock);
1783 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1784 {
1785 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1786 return VINF_SUCCESS;
1787 }
1788 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1789
1790 pWait = VBoxGuestWaitAlloc(pDevExt, NULL);
1791 if (pWait)
1792 break;
1793 if (fInterruptible)
1794 return VERR_INTERRUPTED;
1795 RTThreadSleep(1);
1796 }
1797 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
1798 pWait->pHGCMReq = pHdr;
1799
1800 /*
1801 * Re-enter the spinlock and re-check for the condition.
1802 * If the condition is met, return.
1803 * Otherwise link us into the HGCM wait list and go to sleep.
1804 */
1805 RTSpinlockAcquire(pDevExt->EventSpinlock);
1806 RTListAppend(&pDevExt->HGCMWaitList, &pWait->ListNode);
1807 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1808 {
1809 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1810 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1811 return VINF_SUCCESS;
1812 }
1813 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1814
1815 if (fInterruptible)
1816 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
1817 else
1818 rc = RTSemEventMultiWait(pWait->Event, cMillies);
1819 if (rc == VERR_SEM_DESTROYED)
1820 return rc;
1821
1822 /*
1823 * Unlink, free and return.
1824 */
1825 if ( RT_FAILURE(rc)
1826 && rc != VERR_TIMEOUT
1827 && ( !fInterruptible
1828 || rc != VERR_INTERRUPTED))
1829 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
1830
1831 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1832 return rc;
1833}
1834
1835
1836/**
1837 * This is a callback for dealing with async waits.
1838 *
1839 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1840 */
1841static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
1842{
1843 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1844 Log(("VBoxGuestHGCMAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
1845 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1846 pDevExt,
1847 false /* fInterruptible */,
1848 u32User /* cMillies */);
1849}
1850
1851
1852/**
1853 * This is a callback for dealing with async waits with a timeout.
1854 *
1855 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1856 */
1857static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr,
1858 void *pvUser, uint32_t u32User)
1859{
1860 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1861 Log(("VBoxGuestHGCMAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
1862 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1863 pDevExt,
1864 true /* fInterruptible */,
1865 u32User /* cMillies */ );
1866
1867}
1868
1869
1870static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1871 VBoxGuestHGCMConnectInfo *pInfo, size_t *pcbDataReturned)
1872{
1873 int rc;
1874
1875 /*
1876 * The VbglHGCMConnect call will invoke the callback if the HGCM
1877 * call is performed in an ASYNC fashion. The function is not able
1878 * to deal with cancelled requests.
1879 */
1880 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
1881 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
1882 ? pInfo->Loc.u.host.achName : "<not local host>"));
1883
1884 rc = VbglR0HGCMInternalConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1885 if (RT_SUCCESS(rc))
1886 {
1887 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
1888 pInfo->u32ClientID, pInfo->result, rc));
1889 if (RT_SUCCESS(pInfo->result))
1890 {
1891 /*
1892 * Append the client id to the client id table.
1893 * If the table has somehow become filled up, we'll disconnect the session.
1894 */
1895 unsigned i;
1896 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1897 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1898 if (!pSession->aHGCMClientIds[i])
1899 {
1900 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
1901 break;
1902 }
1903 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
1904 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1905 {
1906 static unsigned s_cErrors = 0;
1907 VBoxGuestHGCMDisconnectInfo Info;
1908
1909 if (s_cErrors++ < 32)
1910 LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
1911
1912 Info.result = 0;
1913 Info.u32ClientID = pInfo->u32ClientID;
1914 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1915 return VERR_TOO_MANY_OPEN_FILES;
1916 }
1917 }
1918 if (pcbDataReturned)
1919 *pcbDataReturned = sizeof(*pInfo);
1920 }
1921 return rc;
1922}
1923
1924
1925static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
1926 size_t *pcbDataReturned)
1927{
1928 /*
1929 * Validate the client id and invalidate its entry while we're in the call.
1930 */
1931 int rc;
1932 const uint32_t u32ClientId = pInfo->u32ClientID;
1933 unsigned i;
1934 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1935 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1936 if (pSession->aHGCMClientIds[i] == u32ClientId)
1937 {
1938 pSession->aHGCMClientIds[i] = UINT32_MAX;
1939 break;
1940 }
1941 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
1942 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1943 {
1944 static unsigned s_cErrors = 0;
1945 if (s_cErrors++ > 32)
1946 LogRel(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
1947 return VERR_INVALID_HANDLE;
1948 }
1949
1950 /*
1951 * The VbglHGCMConnect call will invoke the callback if the HGCM
1952 * call is performed in an ASYNC fashion. The function is not able
1953 * to deal with cancelled requests.
1954 */
1955 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
1956 rc = VbglR0HGCMInternalDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1957 if (RT_SUCCESS(rc))
1958 {
1959 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
1960 if (pcbDataReturned)
1961 *pcbDataReturned = sizeof(*pInfo);
1962 }
1963
1964 /* Update the client id array according to the result. */
1965 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1966 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
1967 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
1968 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
1969
1970 return rc;
1971}
1972
1973
1974static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt,
1975 PVBOXGUESTSESSION pSession,
1976 VBoxGuestHGCMCallInfo *pInfo,
1977 uint32_t cMillies, bool fInterruptible, bool f32bit, bool fUserData,
1978 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
1979{
1980 const uint32_t u32ClientId = pInfo->u32ClientID;
1981 uint32_t fFlags;
1982 size_t cbActual;
1983 unsigned i;
1984 int rc;
1985
1986 /*
1987 * Some more validations.
1988 */
1989 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
1990 {
1991 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
1992 return VERR_INVALID_PARAMETER;
1993 }
1994
1995 cbActual = cbExtra + sizeof(*pInfo);
1996#ifdef RT_ARCH_AMD64
1997 if (f32bit)
1998 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
1999 else
2000#endif
2001 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
2002 if (cbData < cbActual)
2003 {
2004 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
2005 cbData, cbActual));
2006 return VERR_INVALID_PARAMETER;
2007 }
2008
2009 /*
2010 * Validate the client id.
2011 */
2012 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2013 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
2014 if (pSession->aHGCMClientIds[i] == u32ClientId)
2015 break;
2016 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
2017 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
2018 {
2019 static unsigned s_cErrors = 0;
2020 if (s_cErrors++ > 32)
2021 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
2022 return VERR_INVALID_HANDLE;
2023 }
2024
2025 /*
2026 * The VbglHGCMCall call will invoke the callback if the HGCM
2027 * call is performed in an ASYNC fashion. This function can
2028 * deal with cancelled requests, so we let user more requests
2029 * be interruptible (should add a flag for this later I guess).
2030 */
2031 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
2032 fFlags = !fUserData && pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
2033#ifdef RT_ARCH_AMD64
2034 if (f32bit)
2035 {
2036 if (fInterruptible)
2037 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2038 else
2039 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
2040 }
2041 else
2042#endif
2043 {
2044 if (fInterruptible)
2045 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2046 else
2047 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
2048 }
2049 if (RT_SUCCESS(rc))
2050 {
2051 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
2052 if (pcbDataReturned)
2053 *pcbDataReturned = cbActual;
2054 }
2055 else
2056 {
2057 if ( rc != VERR_INTERRUPTED
2058 && rc != VERR_TIMEOUT)
2059 {
2060 static unsigned s_cErrors = 0;
2061 if (s_cErrors++ < 32)
2062 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
2063 }
2064 else
2065 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
2066 }
2067 return rc;
2068}
2069
2070
2071#endif /* VBOX_WITH_HGCM */
2072
2073/**
2074 * Handle VBOXGUEST_IOCTL_CHECK_BALLOON from R3.
2075 *
2076 * Ask the host for the size of the balloon and try to set it accordingly. If
2077 * this approach fails because it's not supported, return with fHandleInR3 set
2078 * and let the user land supply memory we can lock via the other ioctl.
2079 *
2080 * @returns VBox status code.
2081 *
2082 * @param pDevExt The device extension.
2083 * @param pSession The session.
2084 * @param pInfo The output buffer.
2085 * @param pcbDataReturned Where to store the amount of returned data. Can
2086 * be NULL.
2087 */
2088static int VBoxGuestCommonIOCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2089 VBoxGuestCheckBalloonInfo *pInfo, size_t *pcbDataReturned)
2090{
2091 VMMDevGetMemBalloonChangeRequest *pReq;
2092 int rc;
2093
2094 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON\n"));
2095 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2096 AssertRCReturn(rc, rc);
2097
2098 /*
2099 * The first user trying to query/change the balloon becomes the
2100 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
2101 */
2102 if ( pDevExt->MemBalloon.pOwner != pSession
2103 && pDevExt->MemBalloon.pOwner == NULL)
2104 pDevExt->MemBalloon.pOwner = pSession;
2105
2106 if (pDevExt->MemBalloon.pOwner == pSession)
2107 {
2108 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
2109 if (RT_SUCCESS(rc))
2110 {
2111 /*
2112 * This is a response to that event. Setting this bit means that
2113 * we request the value from the host and change the guest memory
2114 * balloon according to this value.
2115 */
2116 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
2117 rc = VbglGRPerform(&pReq->header);
2118 if (RT_SUCCESS(rc))
2119 {
2120 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
2121 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
2122
2123 pInfo->cBalloonChunks = pReq->cBalloonChunks;
2124 pInfo->fHandleInR3 = false;
2125
2126 rc = vboxGuestSetBalloonSizeKernel(pDevExt, pReq->cBalloonChunks, &pInfo->fHandleInR3);
2127 /* Ignore various out of memory failures. */
2128 if ( rc == VERR_NO_MEMORY
2129 || rc == VERR_NO_PHYS_MEMORY
2130 || rc == VERR_NO_CONT_MEMORY)
2131 rc = VINF_SUCCESS;
2132
2133 if (pcbDataReturned)
2134 *pcbDataReturned = sizeof(VBoxGuestCheckBalloonInfo);
2135 }
2136 else
2137 LogRel(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON: VbglGRPerform failed. rc=%Rrc\n", rc));
2138 VbglGRFree(&pReq->header);
2139 }
2140 }
2141 else
2142 rc = VERR_PERMISSION_DENIED;
2143
2144 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2145 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON returns %Rrc\n", rc));
2146 return rc;
2147}
2148
2149
2150/**
2151 * Handle a request for changing the memory balloon.
2152 *
2153 * @returns VBox status code.
2154 *
2155 * @param pDevExt The device extention.
2156 * @param pSession The session.
2157 * @param pInfo The change request structure (input).
2158 * @param pcbDataReturned Where to store the amount of returned data. Can
2159 * be NULL.
2160 */
2161static int VBoxGuestCommonIOCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2162 VBoxGuestChangeBalloonInfo *pInfo, size_t *pcbDataReturned)
2163{
2164 int rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2165 AssertRCReturn(rc, rc);
2166
2167 if (!pDevExt->MemBalloon.fUseKernelAPI)
2168 {
2169 /*
2170 * The first user trying to query/change the balloon becomes the
2171 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
2172 */
2173 if ( pDevExt->MemBalloon.pOwner != pSession
2174 && pDevExt->MemBalloon.pOwner == NULL)
2175 pDevExt->MemBalloon.pOwner = pSession;
2176
2177 if (pDevExt->MemBalloon.pOwner == pSession)
2178 {
2179 rc = vboxGuestSetBalloonSizeFromUser(pDevExt, pSession, pInfo->u64ChunkAddr, !!pInfo->fInflate);
2180 if (pcbDataReturned)
2181 *pcbDataReturned = 0;
2182 }
2183 else
2184 rc = VERR_PERMISSION_DENIED;
2185 }
2186 else
2187 rc = VERR_PERMISSION_DENIED;
2188
2189 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2190 return rc;
2191}
2192
2193
2194/**
2195 * Handle a request for writing a core dump of the guest on the host.
2196 *
2197 * @returns VBox status code.
2198 *
2199 * @param pDevExt The device extension.
2200 * @param pInfo The output buffer.
2201 */
2202static int VBoxGuestCommonIOCtl_WriteCoreDump(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWriteCoreDump *pInfo)
2203{
2204 VMMDevReqWriteCoreDump *pReq = NULL;
2205 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_WriteCoreDump);
2206 if (RT_FAILURE(rc))
2207 {
2208 Log(("VBoxGuestCommonIOCtl: WRITE_CORE_DUMP: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
2209 sizeof(*pReq), sizeof(*pReq), rc));
2210 return rc;
2211 }
2212
2213 pReq->fFlags = pInfo->fFlags;
2214 rc = VbglGRPerform(&pReq->header);
2215 if (RT_FAILURE(rc))
2216 Log(("VBoxGuestCommonIOCtl: WRITE_CORE_DUMP: VbglGRPerform failed, rc=%Rrc!\n", rc));
2217
2218 VbglGRFree(&pReq->header);
2219 return rc;
2220}
2221
2222
2223#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
2224/**
2225 * Enables the VRDP session and saves its session ID.
2226 *
2227 * @returns VBox status code.
2228 *
2229 * @param pDevExt The device extention.
2230 * @param pSession The session.
2231 */
2232static int VBoxGuestCommonIOCtl_EnableVRDPSession(VBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2233{
2234 /* Nothing to do here right now, since this only is supported on Windows at the moment. */
2235 return VERR_NOT_IMPLEMENTED;
2236}
2237
2238
2239/**
2240 * Disables the VRDP session.
2241 *
2242 * @returns VBox status code.
2243 *
2244 * @param pDevExt The device extention.
2245 * @param pSession The session.
2246 */
2247static int VBoxGuestCommonIOCtl_DisableVRDPSession(VBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2248{
2249 /* Nothing to do here right now, since this only is supported on Windows at the moment. */
2250 return VERR_NOT_IMPLEMENTED;
2251}
2252#endif /* VBOX_WITH_VRDP_SESSION_HANDLING */
2253
2254#ifdef DEBUG
2255/** Unit test SetMouseStatus instead of really executing the request. */
2256static bool g_test_fSetMouseStatus = false;
2257/** When unit testing SetMouseStatus, the fake RC for the GR to return. */
2258static int g_test_SetMouseStatusGRRC;
2259/** When unit testing SetMouseStatus this will be set to the status passed to
2260 * the GR. */
2261static uint32_t g_test_statusSetMouseStatus;
2262#endif
2263
2264static int vboxguestcommonSetMouseStatus(uint32_t fFeatures)
2265{
2266 VMMDevReqMouseStatus *pReq;
2267 int rc;
2268
2269 LogRelFlowFunc(("fFeatures=%u\n", (int) fFeatures));
2270 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetMouseStatus);
2271 if (RT_SUCCESS(rc))
2272 {
2273 pReq->mouseFeatures = fFeatures;
2274 pReq->pointerXPos = 0;
2275 pReq->pointerYPos = 0;
2276#ifdef DEBUG
2277 if (g_test_fSetMouseStatus)
2278 {
2279 g_test_statusSetMouseStatus = pReq->mouseFeatures;
2280 rc = g_test_SetMouseStatusGRRC;
2281 }
2282 else
2283#endif
2284 rc = VbglGRPerform(&pReq->header);
2285 VbglGRFree(&pReq->header);
2286 }
2287 LogRelFlowFunc(("rc=%Rrc\n", rc));
2288 return rc;
2289}
2290
2291
2292/**
2293 * Sets the mouse status features for this session and updates them
2294 * globally. We aim to ensure that if several threads call this in
2295 * parallel the most recent status will always end up being set.
2296 *
2297 * @returns VBox status code.
2298 *
2299 * @param pDevExt The device extention.
2300 * @param pSession The session.
2301 * @param fFeatures New bitmap of enabled features.
2302 */
2303static int VBoxGuestCommonIOCtl_SetMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fFeatures)
2304{
2305 uint32_t fNewDevExtStatus = 0;
2306 unsigned i;
2307 int rc;
2308 /* Exit early if nothing has changed - hack to work around the
2309 * Windows Additions not using the common code. */
2310 bool fNoAction;
2311
2312 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2313
2314 /* For all the bits which the guest is allowed to set, check whether the
2315 * requested value is different to the current one and adjust the global
2316 * usage counter and if appropriate the global state if so. */
2317 for (i = 0; i < sizeof(fFeatures) * 8; i++)
2318 {
2319 if (RT_BIT_32(i) & VMMDEV_MOUSE_GUEST_MASK)
2320 {
2321 if ( (RT_BIT_32(i) & fFeatures)
2322 && !(RT_BIT_32(i) & pSession->fMouseStatus))
2323 pDevExt->acMouseFeatureUsage[i]++;
2324 else if ( !(RT_BIT_32(i) & fFeatures)
2325 && (RT_BIT_32(i) & pSession->fMouseStatus))
2326 pDevExt->acMouseFeatureUsage[i]--;
2327 }
2328 if (pDevExt->acMouseFeatureUsage[i] > 0)
2329 fNewDevExtStatus |= RT_BIT_32(i);
2330 }
2331
2332 pSession->fMouseStatus = fFeatures & VMMDEV_MOUSE_GUEST_MASK;
2333 fNoAction = (pDevExt->fMouseStatus == fNewDevExtStatus);
2334 pDevExt->fMouseStatus = fNewDevExtStatus;
2335
2336 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
2337 if (fNoAction)
2338 return VINF_SUCCESS;
2339
2340 do
2341 {
2342 fNewDevExtStatus = pDevExt->fMouseStatus;
2343 rc = vboxguestcommonSetMouseStatus(fNewDevExtStatus);
2344 } while ( RT_SUCCESS(rc)
2345 && fNewDevExtStatus != pDevExt->fMouseStatus);
2346
2347 return rc;
2348}
2349
2350
2351#ifdef DEBUG
2352/** Unit test for the SET_MOUSE_STATUS IoCtl. Since this is closely tied to
2353 * the code in question it probably makes most sense to keep it next to the
2354 * code. */
2355static void testSetMouseStatus(void)
2356{
2357 uint32_t u32Data;
2358 int rc;
2359 RTSPINLOCK Spinlock;
2360
2361 g_test_fSetMouseStatus = true;
2362 rc = RTSpinlockCreate(&Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestTest");
2363 AssertRCReturnVoid(rc);
2364 {
2365 VBOXGUESTDEVEXT DevExt = { 0 };
2366 VBOXGUESTSESSION Session = { 0 };
2367
2368 g_test_statusSetMouseStatus = ~0;
2369 g_test_SetMouseStatusGRRC = VINF_SUCCESS;
2370 DevExt.SessionSpinlock = Spinlock;
2371 u32Data = VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE;
2372 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2373 &Session, &u32Data, sizeof(u32Data), NULL);
2374 AssertRCSuccess(rc);
2375 AssertMsg( g_test_statusSetMouseStatus
2376 == VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE,
2377 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2378 DevExt.acMouseFeatureUsage[ASMBitFirstSetU32(VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR) - 1] = 1;
2379 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2380 &Session, &u32Data, sizeof(u32Data), NULL);
2381 AssertRCSuccess(rc);
2382 AssertMsg( g_test_statusSetMouseStatus
2383 == ( VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE
2384 | VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR),
2385 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2386 u32Data = VMMDEV_MOUSE_HOST_WANTS_ABSOLUTE; /* Can't change this */
2387 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2388 &Session, &u32Data, sizeof(u32Data), NULL);
2389 AssertRCSuccess(rc);
2390 AssertMsg( g_test_statusSetMouseStatus
2391 == VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR,
2392 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2393 u32Data = VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR;
2394 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2395 &Session, &u32Data, sizeof(u32Data), NULL);
2396 AssertRCSuccess(rc);
2397 AssertMsg( g_test_statusSetMouseStatus
2398 == VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR,
2399 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2400 u32Data = 0;
2401 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2402 &Session, &u32Data, sizeof(u32Data), NULL);
2403 AssertRCSuccess(rc);
2404 AssertMsg( g_test_statusSetMouseStatus
2405 == VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR,
2406 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2407 AssertMsg(DevExt.acMouseFeatureUsage[ASMBitFirstSetU32(VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR) - 1] == 1,
2408 ("Actual value: %d\n", DevExt.acMouseFeatureUsage[ASMBitFirstSetU32(VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR)]));
2409 g_test_SetMouseStatusGRRC = VERR_UNRESOLVED_ERROR;
2410 /* This should succeed as the host request should not be made
2411 * since nothing has changed. */
2412 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2413 &Session, &u32Data, sizeof(u32Data), NULL);
2414 AssertRCSuccess(rc);
2415 /* This should fail with VERR_UNRESOLVED_ERROR as set above. */
2416 u32Data = VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE;
2417 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2418 &Session, &u32Data, sizeof(u32Data), NULL);
2419 AssertMsg(rc == VERR_UNRESOLVED_ERROR, ("rc == %Rrc\n", rc));
2420 /* Untested paths: out of memory; race setting status to host */
2421 }
2422 RTSpinlockDestroy(Spinlock);
2423 g_test_fSetMouseStatus = false;
2424}
2425#endif
2426
2427
2428/**
2429 * Guest backdoor logging.
2430 *
2431 * @returns VBox status code.
2432 *
2433 * @param pDevExt The device extension.
2434 * @param pch The log message (need not be NULL terminated).
2435 * @param cbData Size of the buffer.
2436 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2437 */
2438static int VBoxGuestCommonIOCtl_Log(PVBOXGUESTDEVEXT pDevExt, const char *pch, size_t cbData, size_t *pcbDataReturned)
2439{
2440 NOREF(pch);
2441 NOREF(cbData);
2442 if (pDevExt->fLoggingEnabled)
2443 RTLogBackdoorPrintf("%.*s", cbData, pch);
2444 else
2445 Log(("%.*s", cbData, pch));
2446 if (pcbDataReturned)
2447 *pcbDataReturned = 0;
2448 return VINF_SUCCESS;
2449}
2450
2451static bool VBoxGuestCommonGuestCapsValidateValues(uint32_t fCaps)
2452{
2453 if (fCaps & (~(VMMDEV_GUEST_SUPPORTS_SEAMLESS | VMMDEV_GUEST_SUPPORTS_GUEST_HOST_WINDOW_MAPPING | VMMDEV_GUEST_SUPPORTS_GRAPHICS)))
2454 {
2455 LogRel(("VBoxGuestCommonGuestCapsValidateValues: invalid guest caps 0x%x\n", fCaps));
2456 return false;
2457 }
2458 return true;
2459}
2460
2461static int VBoxGuestCommonGuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fOrMask, uint32_t fNotMask)
2462{
2463 if (!VBoxGuestCommonGuestCapsValidateValues(fOrMask))
2464 return VERR_INVALID_PARAMETER;
2465
2466 /* the fNotMask no need to have all values valid,
2467 * invalid ones will simply be ignored */
2468 uint32_t fCurrentOwnedCaps;
2469 uint32_t fSessionNotCaps;
2470 uint32_t fSessionOrCaps;
2471 uint32_t fOtherConflictingCaps;
2472
2473 fNotMask &= ~fOrMask;
2474
2475 RTSpinlockAcquire(pDevExt->EventSpinlock);
2476
2477 pDevExt->u32GuestCapsAcquireMode = 1;
2478 fCurrentOwnedCaps = pSession->u32AquiredGuestCaps;
2479 fSessionNotCaps = fCurrentOwnedCaps & fNotMask;
2480 fSessionOrCaps = fOrMask & ~fCurrentOwnedCaps;
2481 fOtherConflictingCaps = pDevExt->u32GuestCaps & ~fCurrentOwnedCaps;
2482 fOtherConflictingCaps &= fSessionOrCaps;
2483
2484 if (!fOtherConflictingCaps)
2485 {
2486 if (fSessionOrCaps)
2487 {
2488 pSession->u32AquiredGuestCaps |= fSessionOrCaps;
2489 pDevExt->u32GuestCaps |= fSessionOrCaps;
2490 }
2491
2492 if (fSessionNotCaps)
2493 {
2494 pSession->u32AquiredGuestCaps &= ~fSessionNotCaps;
2495 pDevExt->u32GuestCaps &= ~fSessionNotCaps;
2496 }
2497 }
2498
2499 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
2500
2501 if (fOtherConflictingCaps)
2502 {
2503 Log(("VBoxGuest: Caps 0x%x were busy\n", fOtherConflictingCaps));
2504 return VERR_RESOURCE_BUSY;
2505 }
2506
2507 /* now do host notification outside the lock */
2508 if (!fSessionOrCaps && !fSessionNotCaps)
2509 {
2510 /* no changes, return */
2511 return VINF_SUCCESS;
2512 }
2513
2514 int rc = VBoxGuestSetGuestCapabilities(fSessionOrCaps, fSessionNotCaps);
2515 if (RT_SUCCESS(rc))
2516 {
2517 /* success! return */
2518 return VINF_SUCCESS;
2519 }
2520
2521 /* Failure branch
2522 * this is generally bad since e.g. failure to release the caps may result in other sessions not being able to use it
2523 * so we are not trying to restore the caps back to their values before the VBoxGuestCommonGuestCapsAcquire call,
2524 * but just pretend everithing is OK.
2525 * @todo: better failure handling mechanism? */
2526 return VINF_SUCCESS;
2527}
2528
2529static int VBoxGuestCommonIOCTL_GuestCapsAcquire(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestCapsAquire *pAcquire)
2530{
2531 int rc = VBoxGuestCommonGuestCapsAcquire(pDevExt, pSession, pAcquire->u32OrMask, pAcquire->u32NotMask);
2532 if (!RT_SUCCESS(rc))
2533 LogRel(("VBoxGuestCommonGuestCapsAcquire: failed rc %d\n", rc));
2534 pAcquire->rc = rc;
2535 return VINF_SUCCESS;
2536}
2537
2538
2539/**
2540 * Common IOCtl for user to kernel and kernel to kernel communication.
2541 *
2542 * This function only does the basic validation and then invokes
2543 * worker functions that takes care of each specific function.
2544 *
2545 * @returns VBox status code.
2546 *
2547 * @param iFunction The requested function.
2548 * @param pDevExt The device extension.
2549 * @param pSession The client session.
2550 * @param pvData The input/output data buffer. Can be NULL depending on the function.
2551 * @param cbData The max size of the data buffer.
2552 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2553 */
2554int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2555 void *pvData, size_t cbData, size_t *pcbDataReturned)
2556{
2557 int rc;
2558 Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
2559 iFunction, pDevExt, pSession, pvData, cbData));
2560
2561 /*
2562 * Make sure the returned data size is set to zero.
2563 */
2564 if (pcbDataReturned)
2565 *pcbDataReturned = 0;
2566
2567 /*
2568 * Define some helper macros to simplify validation.
2569 */
2570#define CHECKRET_RING0(mnemonic) \
2571 do { \
2572 if (pSession->R0Process != NIL_RTR0PROCESS) \
2573 { \
2574 LogFunc((mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
2575 pSession->Process, (uintptr_t)pSession->R0Process)); \
2576 return VERR_PERMISSION_DENIED; \
2577 } \
2578 } while (0)
2579#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
2580 do { \
2581 if (cbData < (cbMin)) \
2582 { \
2583 LogFunc((mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
2584 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
2585 return VERR_BUFFER_OVERFLOW; \
2586 } \
2587 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
2588 { \
2589 LogFunc((mnemonic ": Invalid pointer %p\n", pvData)); \
2590 return VERR_INVALID_POINTER; \
2591 } \
2592 } while (0)
2593#define CHECKRET_SIZE(mnemonic, cb) \
2594 do { \
2595 if (cbData != (cb)) \
2596 { \
2597 LogFunc((mnemonic ": cbData=%#zx (%zu) expected is %#zx (%zu)\n", \
2598 cbData, cbData, (size_t)(cb), (size_t)(cb))); \
2599 return VERR_BUFFER_OVERFLOW; \
2600 } \
2601 if ((cb) != 0 && !VALID_PTR(pvData)) \
2602 { \
2603 LogFunc((mnemonic ": Invalid pointer %p\n", pvData)); \
2604 return VERR_INVALID_POINTER; \
2605 } \
2606 } while (0)
2607
2608
2609 /*
2610 * Deal with variably sized requests first.
2611 */
2612 rc = VINF_SUCCESS;
2613 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
2614 {
2615 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
2616 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
2617 }
2618#ifdef VBOX_WITH_HGCM
2619 /*
2620 * These ones are a bit tricky.
2621 */
2622 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
2623 {
2624 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2625 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2626 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2627 fInterruptible, false /*f32bit*/, false /* fUserData */,
2628 0, cbData, pcbDataReturned);
2629 }
2630 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
2631 {
2632 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2633 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2634 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2635 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2636 false /*f32bit*/, false /* fUserData */,
2637 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2638 }
2639 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_USERDATA(0)))
2640 {
2641 bool fInterruptible = true;
2642 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2643 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2644 fInterruptible, false /*f32bit*/, true /* fUserData */,
2645 0, cbData, pcbDataReturned);
2646 }
2647# ifdef RT_ARCH_AMD64
2648 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
2649 {
2650 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2651 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2652 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2653 fInterruptible, true /*f32bit*/, false /* fUserData */,
2654 0, cbData, pcbDataReturned);
2655 }
2656 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
2657 {
2658 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2659 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2660 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2661 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2662 true /*f32bit*/, false /* fUserData */,
2663 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2664 }
2665# endif
2666#endif /* VBOX_WITH_HGCM */
2667 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
2668 {
2669 CHECKRET_MIN_SIZE("LOG", 1);
2670 rc = VBoxGuestCommonIOCtl_Log(pDevExt, (char *)pvData, cbData, pcbDataReturned);
2671 }
2672 else
2673 {
2674 switch (iFunction)
2675 {
2676 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
2677 CHECKRET_RING0("GETVMMDEVPORT");
2678 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
2679 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
2680 break;
2681
2682#ifndef RT_OS_WINDOWS /* Windows has its own implementation of this. */
2683 case VBOXGUEST_IOCTL_SET_MOUSE_NOTIFY_CALLBACK:
2684 CHECKRET_RING0("SET_MOUSE_NOTIFY_CALLBACK");
2685 CHECKRET_SIZE("SET_MOUSE_NOTIFY_CALLBACK", sizeof(VBoxGuestMouseSetNotifyCallback));
2686 rc = VBoxGuestCommonIOCtl_SetMouseNotifyCallback(pDevExt, (VBoxGuestMouseSetNotifyCallback *)pvData);
2687 break;
2688#endif
2689
2690 case VBOXGUEST_IOCTL_WAITEVENT:
2691 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
2692 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, pSession, (VBoxGuestWaitEventInfo *)pvData,
2693 pcbDataReturned, pSession->R0Process != NIL_RTR0PROCESS);
2694 break;
2695
2696 case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
2697 if (cbData != 0)
2698 rc = VERR_INVALID_PARAMETER;
2699 rc = VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
2700 break;
2701
2702 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
2703 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
2704 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, (VBoxGuestFilterMaskInfo *)pvData);
2705 break;
2706
2707#ifdef VBOX_WITH_HGCM
2708 case VBOXGUEST_IOCTL_HGCM_CONNECT:
2709# ifdef RT_ARCH_AMD64
2710 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
2711# endif
2712 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
2713 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
2714 break;
2715
2716 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
2717# ifdef RT_ARCH_AMD64
2718 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
2719# endif
2720 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
2721 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
2722 break;
2723#endif /* VBOX_WITH_HGCM */
2724
2725 case VBOXGUEST_IOCTL_CHECK_BALLOON:
2726 CHECKRET_MIN_SIZE("CHECK_MEMORY_BALLOON", sizeof(VBoxGuestCheckBalloonInfo));
2727 rc = VBoxGuestCommonIOCtl_CheckMemoryBalloon(pDevExt, pSession, (VBoxGuestCheckBalloonInfo *)pvData, pcbDataReturned);
2728 break;
2729
2730 case VBOXGUEST_IOCTL_CHANGE_BALLOON:
2731 CHECKRET_MIN_SIZE("CHANGE_MEMORY_BALLOON", sizeof(VBoxGuestChangeBalloonInfo));
2732 rc = VBoxGuestCommonIOCtl_ChangeMemoryBalloon(pDevExt, pSession, (VBoxGuestChangeBalloonInfo *)pvData, pcbDataReturned);
2733 break;
2734
2735 case VBOXGUEST_IOCTL_WRITE_CORE_DUMP:
2736 CHECKRET_MIN_SIZE("WRITE_CORE_DUMP", sizeof(VBoxGuestWriteCoreDump));
2737 rc = VBoxGuestCommonIOCtl_WriteCoreDump(pDevExt, (VBoxGuestWriteCoreDump *)pvData);
2738 break;
2739
2740#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
2741 case VBOXGUEST_IOCTL_ENABLE_VRDP_SESSION:
2742 rc = VBoxGuestCommonIOCtl_EnableVRDPSession(pDevExt, pSession);
2743 break;
2744
2745 case VBOXGUEST_IOCTL_DISABLE_VRDP_SESSION:
2746 rc = VBoxGuestCommonIOCtl_DisableVRDPSession(pDevExt, pSession);
2747 break;
2748#endif /* VBOX_WITH_VRDP_SESSION_HANDLING */
2749 case VBOXGUEST_IOCTL_SET_MOUSE_STATUS:
2750 CHECKRET_SIZE("SET_MOUSE_STATUS", sizeof(uint32_t));
2751 rc = VBoxGuestCommonIOCtl_SetMouseStatus(pDevExt, pSession,
2752 *(uint32_t *)pvData);
2753 break;
2754
2755#ifdef VBOX_WITH_DPC_LATENCY_CHECKER
2756 case VBOXGUEST_IOCTL_DPC_LATENCY_CHECKER:
2757 CHECKRET_SIZE("DPC_LATENCY_CHECKER", 0);
2758 rc = VbgdNtIOCtl_DpcLatencyChecker();
2759 break;
2760#endif
2761
2762 case VBOXGUEST_IOCTL_GUEST_CAPS_ACQUIRE:
2763 CHECKRET_SIZE("GUEST_CAPS_ACQUIRE", sizeof(VBoxGuestCapsAquire));
2764 rc = VBoxGuestCommonIOCTL_GuestCapsAcquire(pDevExt, pSession, (VBoxGuestCapsAquire*)pvData);
2765 *pcbDataReturned = sizeof(VBoxGuestCapsAquire);
2766 break;
2767
2768 default:
2769 {
2770 LogRel(("VBoxGuestCommonIOCtl: Unknown request iFunction=%#x Stripped size=%#x\n", iFunction,
2771 VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
2772 rc = VERR_NOT_SUPPORTED;
2773 break;
2774 }
2775 }
2776 }
2777
2778 Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
2779 return rc;
2780}
2781
2782
2783
2784/**
2785 * Common interrupt service routine.
2786 *
2787 * This deals with events and with waking up thread waiting for those events.
2788 *
2789 * @returns true if it was our interrupt, false if it wasn't.
2790 * @param pDevExt The VBoxGuest device extension.
2791 */
2792bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
2793{
2794#ifndef RT_OS_WINDOWS
2795 VBoxGuestMouseSetNotifyCallback MouseNotifyCallback = { NULL, NULL };
2796#endif
2797 bool fMousePositionChanged = false;
2798 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
2799 int rc = 0;
2800 bool fOurIrq;
2801
2802 /*
2803 * Make sure we've initialized the device extension.
2804 */
2805 if (RT_UNLIKELY(!pReq))
2806 return false;
2807
2808 /*
2809 * Enter the spinlock, increase the ISR count and check if it's our IRQ or
2810 * not.
2811 */
2812 RTSpinlockAcquire(pDevExt->EventSpinlock);
2813 ASMAtomicIncU32(&pDevExt->cISR);
2814 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
2815 if (fOurIrq)
2816 {
2817 /*
2818 * Acknowlegde events.
2819 * We don't use VbglGRPerform here as it may take another spinlocks.
2820 */
2821 pReq->header.rc = VERR_INTERNAL_ERROR;
2822 pReq->events = 0;
2823 ASMCompilerBarrier();
2824 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
2825 ASMCompilerBarrier(); /* paranoia */
2826 if (RT_SUCCESS(pReq->header.rc))
2827 {
2828 uint32_t fEvents = pReq->events;
2829 PVBOXGUESTWAIT pWait;
2830 PVBOXGUESTWAIT pSafe;
2831
2832 Log(("VBoxGuestCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
2833
2834 /*
2835 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
2836 */
2837 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
2838 {
2839#ifndef RT_OS_WINDOWS
2840 MouseNotifyCallback = pDevExt->MouseNotifyCallback;
2841#endif
2842 fMousePositionChanged = true;
2843 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
2844 }
2845
2846#ifdef VBOX_WITH_HGCM
2847 /*
2848 * The HGCM event/list is kind of different in that we evaluate all entries.
2849 */
2850 if (fEvents & VMMDEV_EVENT_HGCM)
2851 {
2852 RTListForEachSafe(&pDevExt->HGCMWaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
2853 {
2854 if (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE)
2855 {
2856 pWait->fResEvents = VMMDEV_EVENT_HGCM;
2857 RTListNodeRemove(&pWait->ListNode);
2858# ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2859 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
2860# else
2861 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
2862 rc |= RTSemEventMultiSignal(pWait->Event);
2863# endif
2864 }
2865 }
2866 fEvents &= ~VMMDEV_EVENT_HGCM;
2867 }
2868#endif
2869
2870 /*
2871 * Normal FIFO waiter evaluation.
2872 */
2873 fEvents |= pDevExt->f32PendingEvents;
2874 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
2875 {
2876 uint32_t fHandledEvents = VBoxGuestCommonGetHandledEventsLocked(pDevExt, pWait->pSession);
2877 if ( (pWait->fReqEvents & fEvents & fHandledEvents)
2878 && !pWait->fResEvents)
2879 {
2880 pWait->fResEvents = pWait->fReqEvents & fEvents & fHandledEvents;
2881 fEvents &= ~pWait->fResEvents;
2882 RTListNodeRemove(&pWait->ListNode);
2883#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2884 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
2885#else
2886 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
2887 rc |= RTSemEventMultiSignal(pWait->Event);
2888#endif
2889 if (!fEvents)
2890 break;
2891 }
2892 }
2893 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
2894 }
2895 else /* something is serious wrong... */
2896 Log(("VBoxGuestCommonISR: acknowledge events failed rc=%Rrc (events=%#x)!!\n",
2897 pReq->header.rc, pReq->events));
2898 }
2899 else
2900 LogFlow(("VBoxGuestCommonISR: not ours\n"));
2901
2902 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
2903
2904#if defined(VBOXGUEST_USE_DEFERRED_WAKE_UP) && !defined(RT_OS_WINDOWS)
2905 /*
2906 * Do wake-ups.
2907 * Note. On Windows this isn't possible at this IRQL, so a DPC will take
2908 * care of it.
2909 */
2910 VBoxGuestWaitDoWakeUps(pDevExt);
2911#endif
2912
2913 /*
2914 * Work the poll and async notification queues on OSes that implements that.
2915 * (Do this outside the spinlock to prevent some recursive spinlocking.)
2916 */
2917 if (fMousePositionChanged)
2918 {
2919 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
2920 VBoxGuestNativeISRMousePollEvent(pDevExt);
2921#ifndef RT_OS_WINDOWS
2922 if (MouseNotifyCallback.pfnNotify)
2923 MouseNotifyCallback.pfnNotify(MouseNotifyCallback.pvUser);
2924#endif
2925 }
2926
2927 ASMAtomicDecU32(&pDevExt->cISR);
2928 Assert(rc == 0);
2929 return fOurIrq;
2930}
2931
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette