VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 45221

Last change on this file since 45221 was 44992, checked in by vboxsync, 12 years ago

VBOX_WITH_DPC_LATENCY_CHECKER: Some adjustments. Please, don't use #pragma pack() unless you really need and mean it! Misaligning data just makes things slow...

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 99.4 KB
Line 
1/* $Id: VBoxGuest.cpp 44992 2013-03-11 15:41:01Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#define LOG_GROUP LOG_GROUP_DEFAULT
32#include "VBoxGuestInternal.h"
33#include "VBoxGuest2.h"
34#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
35#include <VBox/log.h>
36#include <iprt/mem.h>
37#include <iprt/time.h>
38#include <iprt/memobj.h>
39#include <iprt/asm.h>
40#include <iprt/asm-amd64-x86.h>
41#include <iprt/string.h>
42#include <iprt/process.h>
43#include <iprt/assert.h>
44#include <iprt/param.h>
45#ifdef VBOX_WITH_HGCM
46# include <iprt/thread.h>
47#endif
48#include "version-generated.h"
49#if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
50# include "revision-generated.h"
51#endif
52#ifdef RT_OS_WINDOWS
53# ifndef CTL_CODE
54# include <Windows.h>
55# endif
56#endif
57#if defined(RT_OS_SOLARIS)
58# include <iprt/rand.h>
59#endif
60
61
62/*******************************************************************************
63* Internal Functions *
64*******************************************************************************/
65#ifdef VBOX_WITH_HGCM
66static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
67#endif
68#ifdef DEBUG
69static void testSetMouseStatus(void);
70#endif
71static int VBoxGuestCommonIOCtl_SetMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fFeatures);
72
73
74/*******************************************************************************
75* Global Variables *
76*******************************************************************************/
77static const size_t cbChangeMemBalloonReq = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
78
79#if defined(RT_OS_SOLARIS)
80/**
81 * Drag in the rest of IRPT since we share it with the
82 * rest of the kernel modules on Solaris.
83 */
84PFNRT g_apfnVBoxGuestIPRTDeps[] =
85{
86 /* VirtioNet */
87 (PFNRT)RTRandBytes,
88 /* RTSemMutex* */
89 (PFNRT)RTSemMutexCreate,
90 (PFNRT)RTSemMutexDestroy,
91 (PFNRT)RTSemMutexRequest,
92 (PFNRT)RTSemMutexRequestNoResume,
93 (PFNRT)RTSemMutexRequestDebug,
94 (PFNRT)RTSemMutexRequestNoResumeDebug,
95 (PFNRT)RTSemMutexRelease,
96 (PFNRT)RTSemMutexIsOwned,
97 NULL
98};
99#endif /* RT_OS_SOLARIS */
100
101
102/**
103 * Reserves memory in which the VMM can relocate any guest mappings
104 * that are floating around.
105 *
106 * This operation is a little bit tricky since the VMM might not accept
107 * just any address because of address clashes between the three contexts
108 * it operates in, so use a small stack to perform this operation.
109 *
110 * @returns VBox status code (ignored).
111 * @param pDevExt The device extension.
112 */
113static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
114{
115 /*
116 * Query the required space.
117 */
118 VMMDevReqHypervisorInfo *pReq;
119 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
120 if (RT_FAILURE(rc))
121 return rc;
122 pReq->hypervisorStart = 0;
123 pReq->hypervisorSize = 0;
124 rc = VbglGRPerform(&pReq->header);
125 if (RT_FAILURE(rc)) /* this shouldn't happen! */
126 {
127 VbglGRFree(&pReq->header);
128 return rc;
129 }
130
131 /*
132 * The VMM will report back if there is nothing it wants to map, like for
133 * instance in VT-x and AMD-V mode.
134 */
135 if (pReq->hypervisorSize == 0)
136 Log(("vboxGuestInitFixateGuestMappings: nothing to do\n"));
137 else
138 {
139 /*
140 * We have to try several times since the host can be picky
141 * about certain addresses.
142 */
143 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
144 uint32_t cbHypervisor = pReq->hypervisorSize;
145 RTR0MEMOBJ ahTries[5];
146 uint32_t iTry;
147 bool fBitched = false;
148 Log(("vboxGuestInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
149 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
150 {
151 /*
152 * Reserve space, or if that isn't supported, create a object for
153 * some fictive physical memory and map that in to kernel space.
154 *
155 * To make the code a bit uglier, most systems cannot help with
156 * 4MB alignment, so we have to deal with that in addition to
157 * having two ways of getting the memory.
158 */
159 uint32_t uAlignment = _4M;
160 RTR0MEMOBJ hObj;
161 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
162 if (rc == VERR_NOT_SUPPORTED)
163 {
164 uAlignment = PAGE_SIZE;
165 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
166 }
167 /*
168 * If both RTR0MemObjReserveKernel calls above failed because either not supported or
169 * not implemented at all at the current platform, try to map the memory object into the
170 * virtual kernel space.
171 */
172 if (rc == VERR_NOT_SUPPORTED)
173 {
174 if (hFictive == NIL_RTR0MEMOBJ)
175 {
176 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
177 if (RT_FAILURE(rc))
178 break;
179 hFictive = hObj;
180 }
181 uAlignment = _4M;
182 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
183 if (rc == VERR_NOT_SUPPORTED)
184 {
185 uAlignment = PAGE_SIZE;
186 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
187 }
188 }
189 if (RT_FAILURE(rc))
190 {
191 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
192 rc, cbHypervisor, uAlignment, iTry));
193 fBitched = true;
194 break;
195 }
196
197 /*
198 * Try set it.
199 */
200 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
201 pReq->header.rc = VERR_INTERNAL_ERROR;
202 pReq->hypervisorSize = cbHypervisor;
203 pReq->hypervisorStart = (uintptr_t)RTR0MemObjAddress(hObj);
204 if ( uAlignment == PAGE_SIZE
205 && pReq->hypervisorStart & (_4M - 1))
206 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
207 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
208
209 rc = VbglGRPerform(&pReq->header);
210 if (RT_SUCCESS(rc))
211 {
212 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
213 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
214 RTR0MemObjAddress(pDevExt->hGuestMappings),
215 RTR0MemObjSize(pDevExt->hGuestMappings),
216 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
217 break;
218 }
219 ahTries[iTry] = hObj;
220 }
221
222 /*
223 * Cleanup failed attempts.
224 */
225 while (iTry-- > 0)
226 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
227 if ( RT_FAILURE(rc)
228 && hFictive != NIL_RTR0PTR)
229 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
230 if (RT_FAILURE(rc) && !fBitched)
231 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
232 }
233 VbglGRFree(&pReq->header);
234
235 /*
236 * We ignore failed attempts for now.
237 */
238 return VINF_SUCCESS;
239}
240
241
242/**
243 * Undo what vboxGuestInitFixateGuestMappings did.
244 *
245 * @param pDevExt The device extension.
246 */
247static void vboxGuestTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
248{
249 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
250 {
251 /*
252 * Tell the host that we're going to free the memory we reserved for
253 * it, the free it up. (Leak the memory if anything goes wrong here.)
254 */
255 VMMDevReqHypervisorInfo *pReq;
256 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
257 if (RT_SUCCESS(rc))
258 {
259 pReq->hypervisorStart = 0;
260 pReq->hypervisorSize = 0;
261 rc = VbglGRPerform(&pReq->header);
262 VbglGRFree(&pReq->header);
263 }
264 if (RT_SUCCESS(rc))
265 {
266 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
267 AssertRC(rc);
268 }
269 else
270 LogRel(("vboxGuestTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
271
272 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
273 }
274}
275
276
277/**
278 * Sets the interrupt filter mask during initialization and termination.
279 *
280 * This will ASSUME that we're the ones in carge over the mask, so
281 * we'll simply clear all bits we don't set.
282 *
283 * @returns VBox status code (ignored).
284 * @param pDevExt The device extension.
285 * @param fMask The new mask.
286 */
287static int vboxGuestSetFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask)
288{
289 VMMDevCtlGuestFilterMask *pReq;
290 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
291 if (RT_SUCCESS(rc))
292 {
293 pReq->u32OrMask = fMask;
294 pReq->u32NotMask = ~fMask;
295 rc = VbglGRPerform(&pReq->header);
296 if (RT_FAILURE(rc))
297 LogRel(("vboxGuestSetFilterMask: failed with rc=%Rrc\n", rc));
298 VbglGRFree(&pReq->header);
299 }
300 return rc;
301}
302
303
304/**
305 * Inflate the balloon by one chunk represented by an R0 memory object.
306 *
307 * The caller owns the balloon mutex.
308 *
309 * @returns IPRT status code.
310 * @param pMemObj Pointer to the R0 memory object.
311 * @param pReq The pre-allocated request for performing the VMMDev call.
312 */
313static int vboxGuestBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
314{
315 uint32_t iPage;
316 int rc;
317
318 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
319 {
320 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
321 pReq->aPhysPage[iPage] = phys;
322 }
323
324 pReq->fInflate = true;
325 pReq->header.size = cbChangeMemBalloonReq;
326 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
327
328 rc = VbglGRPerform(&pReq->header);
329 if (RT_FAILURE(rc))
330 LogRel(("vboxGuestBalloonInflate: VbglGRPerform failed. rc=%Rrc\n", rc));
331 return rc;
332}
333
334
335/**
336 * Deflate the balloon by one chunk - info the host and free the memory object.
337 *
338 * The caller owns the balloon mutex.
339 *
340 * @returns IPRT status code.
341 * @param pMemObj Pointer to the R0 memory object.
342 * The memory object will be freed afterwards.
343 * @param pReq The pre-allocated request for performing the VMMDev call.
344 */
345static int vboxGuestBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
346{
347 uint32_t iPage;
348 int rc;
349
350 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
351 {
352 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
353 pReq->aPhysPage[iPage] = phys;
354 }
355
356 pReq->fInflate = false;
357 pReq->header.size = cbChangeMemBalloonReq;
358 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
359
360 rc = VbglGRPerform(&pReq->header);
361 if (RT_FAILURE(rc))
362 {
363 LogRel(("vboxGuestBalloonDeflate: VbglGRPerform failed. rc=%Rrc\n", rc));
364 return rc;
365 }
366
367 rc = RTR0MemObjFree(*pMemObj, true);
368 if (RT_FAILURE(rc))
369 {
370 LogRel(("vboxGuestBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
371 return rc;
372 }
373
374 *pMemObj = NIL_RTR0MEMOBJ;
375 return VINF_SUCCESS;
376}
377
378
379/**
380 * Inflate/deflate the memory balloon and notify the host.
381 *
382 * This is a worker used by VBoxGuestCommonIOCtl_CheckMemoryBalloon - it takes
383 * the mutex.
384 *
385 * @returns VBox status code.
386 * @param pDevExt The device extension.
387 * @param pSession The session.
388 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
389 * @param pfHandleInR3 Where to return the handle-in-ring3 indicator
390 * (VINF_SUCCESS if set).
391 */
392static int vboxGuestSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, uint32_t *pfHandleInR3)
393{
394 int rc = VINF_SUCCESS;
395
396 if (pDevExt->MemBalloon.fUseKernelAPI)
397 {
398 VMMDevChangeMemBalloon *pReq;
399 uint32_t i;
400
401 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
402 {
403 LogRel(("vboxGuestSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
404 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
405 return VERR_INVALID_PARAMETER;
406 }
407
408 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
409 return VINF_SUCCESS; /* nothing to do */
410
411 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
412 && !pDevExt->MemBalloon.paMemObj)
413 {
414 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
415 if (!pDevExt->MemBalloon.paMemObj)
416 {
417 LogRel(("VBoxGuestSetBalloonSizeKernel: no memory for paMemObj!\n"));
418 return VERR_NO_MEMORY;
419 }
420 }
421
422 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
423 if (RT_FAILURE(rc))
424 return rc;
425
426 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
427 {
428 /* inflate */
429 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
430 {
431 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
432 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
433 if (RT_FAILURE(rc))
434 {
435 if (rc == VERR_NOT_SUPPORTED)
436 {
437 /* not supported -- fall back to the R3-allocated memory. */
438 rc = VINF_SUCCESS;
439 pDevExt->MemBalloon.fUseKernelAPI = false;
440 Assert(pDevExt->MemBalloon.cChunks == 0);
441 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
442 }
443 /* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
444 * cannot allocate more memory => don't try further, just stop here */
445 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
446 break;
447 }
448
449 rc = vboxGuestBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
450 if (RT_FAILURE(rc))
451 {
452 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
453 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
454 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
455 break;
456 }
457 pDevExt->MemBalloon.cChunks++;
458 }
459 }
460 else
461 {
462 /* deflate */
463 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
464 {
465 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
466 if (RT_FAILURE(rc))
467 {
468 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
469 break;
470 }
471 pDevExt->MemBalloon.cChunks--;
472 }
473 }
474
475 VbglGRFree(&pReq->header);
476 }
477
478 /*
479 * Set the handle-in-ring3 indicator. When set Ring-3 will have to work
480 * the balloon changes via the other API.
481 */
482 *pfHandleInR3 = pDevExt->MemBalloon.fUseKernelAPI ? false : true;
483
484 return rc;
485}
486
487
488/**
489 * Helper to reinit the VBoxVMM communication after hibernation.
490 *
491 * @returns VBox status code.
492 * @param pDevExt The device extension.
493 * @param enmOSType The OS type.
494 */
495int VBoxGuestReinitDevExtAfterHibernation(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
496{
497 int rc = VBoxGuestReportGuestInfo(enmOSType);
498 if (RT_SUCCESS(rc))
499 {
500 rc = VBoxGuestReportDriverStatus(true /* Driver is active */);
501 if (RT_FAILURE(rc))
502 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: could not report guest driver status, rc=%Rrc\n", rc));
503 }
504 else
505 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: could not report guest information to host, rc=%Rrc\n", rc));
506 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: returned with rc=%Rrc\n", rc));
507 return rc;
508}
509
510
511/**
512 * Inflate/deflate the balloon by one chunk.
513 *
514 * Worker for VBoxGuestCommonIOCtl_ChangeMemoryBalloon - it takes the mutex.
515 *
516 * @returns VBox status code.
517 * @param pDevExt The device extension.
518 * @param pSession The session.
519 * @param u64ChunkAddr The address of the chunk to add to / remove from the
520 * balloon.
521 * @param fInflate Inflate if true, deflate if false.
522 */
523static int vboxGuestSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
524 uint64_t u64ChunkAddr, bool fInflate)
525{
526 VMMDevChangeMemBalloon *pReq;
527 int rc = VINF_SUCCESS;
528 uint32_t i;
529 PRTR0MEMOBJ pMemObj = NULL;
530
531 if (fInflate)
532 {
533 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
534 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
535 {
536 LogRel(("vboxGuestSetBalloonSize: cannot inflate balloon, already have %u chunks (max=%u)\n",
537 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
538 return VERR_INVALID_PARAMETER;
539 }
540
541 if (!pDevExt->MemBalloon.paMemObj)
542 {
543 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
544 if (!pDevExt->MemBalloon.paMemObj)
545 {
546 LogRel(("VBoxGuestSetBalloonSizeFromUser: no memory for paMemObj!\n"));
547 return VERR_NO_MEMORY;
548 }
549 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
550 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
551 }
552 }
553 else
554 {
555 if (pDevExt->MemBalloon.cChunks == 0)
556 {
557 AssertMsgFailed(("vboxGuestSetBalloonSize: cannot decrease balloon, already at size 0\n"));
558 return VERR_INVALID_PARAMETER;
559 }
560 }
561
562 /*
563 * Enumerate all memory objects and check if the object is already registered.
564 */
565 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
566 {
567 if ( fInflate
568 && !pMemObj
569 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
570 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
571 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == u64ChunkAddr)
572 {
573 if (fInflate)
574 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
575 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
576 break;
577 }
578 }
579 if (!pMemObj)
580 {
581 if (fInflate)
582 {
583 /* no free object pointer found -- should not happen */
584 return VERR_NO_MEMORY;
585 }
586
587 /* cannot free this memory as it wasn't provided before */
588 return VERR_NOT_FOUND;
589 }
590
591 /*
592 * Try inflate / default the balloon as requested.
593 */
594 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
595 if (RT_FAILURE(rc))
596 return rc;
597
598 if (fInflate)
599 {
600 rc = RTR0MemObjLockUser(pMemObj, (RTR3PTR)u64ChunkAddr, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
601 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
602 if (RT_SUCCESS(rc))
603 {
604 rc = vboxGuestBalloonInflate(pMemObj, pReq);
605 if (RT_SUCCESS(rc))
606 pDevExt->MemBalloon.cChunks++;
607 else
608 {
609 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
610 RTR0MemObjFree(*pMemObj, true);
611 *pMemObj = NIL_RTR0MEMOBJ;
612 }
613 }
614 }
615 else
616 {
617 rc = vboxGuestBalloonDeflate(pMemObj, pReq);
618 if (RT_SUCCESS(rc))
619 pDevExt->MemBalloon.cChunks--;
620 else
621 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
622 }
623
624 VbglGRFree(&pReq->header);
625 return rc;
626}
627
628
629/**
630 * Cleanup the memory balloon of a session.
631 *
632 * Will request the balloon mutex, so it must be valid and the caller must not
633 * own it already.
634 *
635 * @param pDevExt The device extension.
636 * @param pDevExt The session. Can be NULL at unload.
637 */
638static void vboxGuestCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
639{
640 RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
641 if ( pDevExt->MemBalloon.pOwner == pSession
642 || pSession == NULL /*unload*/)
643 {
644 if (pDevExt->MemBalloon.paMemObj)
645 {
646 VMMDevChangeMemBalloon *pReq;
647 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
648 if (RT_SUCCESS(rc))
649 {
650 uint32_t i;
651 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
652 {
653 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
654 if (RT_FAILURE(rc))
655 {
656 LogRel(("vboxGuestCloseMemBalloon: Deflate failed with rc=%Rrc. Will leak %u chunks.\n",
657 rc, pDevExt->MemBalloon.cChunks));
658 break;
659 }
660 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
661 pDevExt->MemBalloon.cChunks--;
662 }
663 VbglGRFree(&pReq->header);
664 }
665 else
666 LogRel(("vboxGuestCloseMemBalloon: Failed to allocate VMMDev request buffer (rc=%Rrc). Will leak %u chunks.\n",
667 rc, pDevExt->MemBalloon.cChunks));
668 RTMemFree(pDevExt->MemBalloon.paMemObj);
669 pDevExt->MemBalloon.paMemObj = NULL;
670 }
671
672 pDevExt->MemBalloon.pOwner = NULL;
673 }
674 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
675}
676
677
678/**
679 * Initializes the VBoxGuest device extension when the
680 * device driver is loaded.
681 *
682 * The native code locates the VMMDev on the PCI bus and retrieve
683 * the MMIO and I/O port ranges, this function will take care of
684 * mapping the MMIO memory (if present). Upon successful return
685 * the native code should set up the interrupt handler.
686 *
687 * @returns VBox status code.
688 *
689 * @param pDevExt The device extension. Allocated by the native code.
690 * @param IOPortBase The base of the I/O port range.
691 * @param pvMMIOBase The base of the MMIO memory mapping.
692 * This is optional, pass NULL if not present.
693 * @param cbMMIO The size of the MMIO memory mapping.
694 * This is optional, pass 0 if not present.
695 * @param enmOSType The guest OS type to report to the VMMDev.
696 * @param fFixedEvents Events that will be enabled upon init and no client
697 * will ever be allowed to mask.
698 */
699int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
700 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
701{
702 int rc, rc2;
703 unsigned i;
704
705 /*
706 * Adjust fFixedEvents.
707 */
708#ifdef VBOX_WITH_HGCM
709 fFixedEvents |= VMMDEV_EVENT_HGCM;
710#endif
711
712 /*
713 * Initialize the data.
714 */
715 pDevExt->IOPortBase = IOPortBase;
716 pDevExt->pVMMDevMemory = NULL;
717 pDevExt->fFixedEvents = fFixedEvents;
718 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
719 pDevExt->EventSpinlock = NIL_RTSPINLOCK;
720 pDevExt->pIrqAckEvents = NULL;
721 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
722 RTListInit(&pDevExt->WaitList);
723#ifdef VBOX_WITH_HGCM
724 RTListInit(&pDevExt->HGCMWaitList);
725#endif
726#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
727 RTListInit(&pDevExt->WakeUpList);
728#endif
729 RTListInit(&pDevExt->WokenUpList);
730 RTListInit(&pDevExt->FreeList);
731#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
732 pDevExt->fVRDPEnabled = false;
733#endif
734 pDevExt->fLoggingEnabled = false;
735 pDevExt->f32PendingEvents = 0;
736 pDevExt->u32MousePosChangedSeq = 0;
737 pDevExt->SessionSpinlock = NIL_RTSPINLOCK;
738 pDevExt->MemBalloon.hMtx = NIL_RTSEMFASTMUTEX;
739 pDevExt->MemBalloon.cChunks = 0;
740 pDevExt->MemBalloon.cMaxChunks = 0;
741 pDevExt->MemBalloon.fUseKernelAPI = true;
742 pDevExt->MemBalloon.paMemObj = NULL;
743 pDevExt->MemBalloon.pOwner = NULL;
744 for (i = 0; i < RT_ELEMENTS(pDevExt->acMouseFeatureUsage); ++i)
745 pDevExt->acMouseFeatureUsage[i] = 0;
746 pDevExt->fMouseStatus = 0;
747 pDevExt->MouseNotifyCallback.pfnNotify = NULL;
748 pDevExt->MouseNotifyCallback.pvUser = NULL;
749 pDevExt->cISR = 0;
750
751 /*
752 * If there is an MMIO region validate the version and size.
753 */
754 if (pvMMIOBase)
755 {
756 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
757 Assert(cbMMIO);
758 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
759 && pVMMDev->u32Size >= 32
760 && pVMMDev->u32Size <= cbMMIO)
761 {
762 pDevExt->pVMMDevMemory = pVMMDev;
763 Log(("VBoxGuestInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
764 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
765 }
766 else /* try live without it. */
767 LogRel(("VBoxGuestInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
768 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
769 }
770
771 /*
772 * Create the wait and session spinlocks as well as the ballooning mutex.
773 */
774 rc = RTSpinlockCreate(&pDevExt->EventSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestEvent");
775 if (RT_SUCCESS(rc))
776 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestSession");
777 if (RT_FAILURE(rc))
778 {
779 LogRel(("VBoxGuestInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
780 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
781 RTSpinlockDestroy(pDevExt->EventSpinlock);
782 return rc;
783 }
784
785 rc = RTSemFastMutexCreate(&pDevExt->MemBalloon.hMtx);
786 if (RT_FAILURE(rc))
787 {
788 LogRel(("VBoxGuestInitDevExt: failed to create mutex, rc=%Rrc!\n", rc));
789 RTSpinlockDestroy(pDevExt->SessionSpinlock);
790 RTSpinlockDestroy(pDevExt->EventSpinlock);
791 return rc;
792 }
793
794 /*
795 * Initialize the guest library and report the guest info back to VMMDev,
796 * set the interrupt control filter mask, and fixate the guest mappings
797 * made by the VMM.
798 */
799 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
800 if (RT_SUCCESS(rc))
801 {
802 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
803 if (RT_SUCCESS(rc))
804 {
805 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
806 Assert(pDevExt->PhysIrqAckEvents != 0);
807
808 rc = VBoxGuestReportGuestInfo(enmOSType);
809 if (RT_SUCCESS(rc))
810 {
811 rc = vboxGuestSetFilterMask(pDevExt, fFixedEvents);
812 if (RT_SUCCESS(rc))
813 {
814 /*
815 * Disable guest graphics capability by default. The guest specific
816 * graphics driver will re-enable this when it is necessary.
817 */
818 rc = VBoxGuestSetGuestCapabilities(0, VMMDEV_GUEST_SUPPORTS_GRAPHICS);
819 if (RT_SUCCESS(rc))
820 {
821 vboxGuestInitFixateGuestMappings(pDevExt);
822
823#ifdef DEBUG
824 testSetMouseStatus(); /* Other tests? */
825#endif
826
827 rc = VBoxGuestReportDriverStatus(true /* Driver is active */);
828 if (RT_FAILURE(rc))
829 LogRel(("VBoxGuestInitDevExt: VBoxReportGuestDriverStatus failed, rc=%Rrc\n", rc));
830
831 Log(("VBoxGuestInitDevExt: returns success\n"));
832 return VINF_SUCCESS;
833 }
834
835 LogRel(("VBoxGuestInitDevExt: VBoxGuestSetGuestCapabilities failed, rc=%Rrc\n", rc));
836 }
837 else
838 LogRel(("VBoxGuestInitDevExt: vboxGuestSetFilterMask failed, rc=%Rrc\n", rc));
839 }
840 else
841 LogRel(("VBoxGuestInitDevExt: VBoxReportGuestInfo failed, rc=%Rrc\n", rc));
842 VbglGRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
843 }
844 else
845 LogRel(("VBoxGuestInitDevExt: VBoxGRAlloc failed, rc=%Rrc\n", rc));
846
847 VbglTerminate();
848 }
849 else
850 LogRel(("VBoxGuestInitDevExt: VbglInit failed, rc=%Rrc\n", rc));
851
852 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
853 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
854 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
855 return rc; /* (failed) */
856}
857
858
859/**
860 * Deletes all the items in a wait chain.
861 * @param pList The head of the chain.
862 */
863static void VBoxGuestDeleteWaitList(PRTLISTNODE pList)
864{
865 while (!RTListIsEmpty(pList))
866 {
867 int rc2;
868 PVBOXGUESTWAIT pWait = RTListGetFirst(pList, VBOXGUESTWAIT, ListNode);
869 RTListNodeRemove(&pWait->ListNode);
870
871 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
872 pWait->Event = NIL_RTSEMEVENTMULTI;
873 pWait->pSession = NULL;
874 RTMemFree(pWait);
875 }
876}
877
878
879/**
880 * Destroys the VBoxGuest device extension.
881 *
882 * The native code should call this before the driver is loaded,
883 * but don't call this on shutdown.
884 *
885 * @param pDevExt The device extension.
886 */
887void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
888{
889 int rc2;
890 Log(("VBoxGuestDeleteDevExt:\n"));
891 Log(("VBoxGuest: The additions driver is terminating.\n"));
892
893 /*
894 * Clean up the bits that involves the host first.
895 */
896 vboxGuestTermUnfixGuestMappings(pDevExt);
897 VBoxGuestSetGuestCapabilities(0, UINT32_MAX); /* clears all capabilities */
898 vboxGuestSetFilterMask(pDevExt, 0); /* filter all events */
899 vboxGuestCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
900
901 /*
902 * Cleanup all the other resources.
903 */
904 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
905 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
906 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
907
908 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
909#ifdef VBOX_WITH_HGCM
910 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
911#endif
912#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
913 VBoxGuestDeleteWaitList(&pDevExt->WakeUpList);
914#endif
915 VBoxGuestDeleteWaitList(&pDevExt->WokenUpList);
916 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
917
918 VbglTerminate();
919
920 pDevExt->pVMMDevMemory = NULL;
921
922 pDevExt->IOPortBase = 0;
923 pDevExt->pIrqAckEvents = NULL;
924}
925
926
927/**
928 * Creates a VBoxGuest user session.
929 *
930 * The native code calls this when a ring-3 client opens the device.
931 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
932 *
933 * @returns VBox status code.
934 * @param pDevExt The device extension.
935 * @param ppSession Where to store the session on success.
936 */
937int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
938{
939 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
940 if (RT_UNLIKELY(!pSession))
941 {
942 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
943 return VERR_NO_MEMORY;
944 }
945
946 pSession->Process = RTProcSelf();
947 pSession->R0Process = RTR0ProcHandleSelf();
948 pSession->pDevExt = pDevExt;
949
950 *ppSession = pSession;
951 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
952 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
953 return VINF_SUCCESS;
954}
955
956
957/**
958 * Creates a VBoxGuest kernel session.
959 *
960 * The native code calls this when a ring-0 client connects to the device.
961 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
962 *
963 * @returns VBox status code.
964 * @param pDevExt The device extension.
965 * @param ppSession Where to store the session on success.
966 */
967int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
968{
969 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
970 if (RT_UNLIKELY(!pSession))
971 {
972 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
973 return VERR_NO_MEMORY;
974 }
975
976 pSession->Process = NIL_RTPROCESS;
977 pSession->R0Process = NIL_RTR0PROCESS;
978 pSession->pDevExt = pDevExt;
979
980 *ppSession = pSession;
981 LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
982 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
983 return VINF_SUCCESS;
984}
985
986
987
988/**
989 * Closes a VBoxGuest session.
990 *
991 * @param pDevExt The device extension.
992 * @param pSession The session to close (and free).
993 */
994void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
995{
996 unsigned i; NOREF(i);
997 Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
998 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
999
1000#ifdef VBOX_WITH_HGCM
1001 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1002 if (pSession->aHGCMClientIds[i])
1003 {
1004 VBoxGuestHGCMDisconnectInfo Info;
1005 Info.result = 0;
1006 Info.u32ClientID = pSession->aHGCMClientIds[i];
1007 pSession->aHGCMClientIds[i] = 0;
1008 Log(("VBoxGuestCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
1009 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1010 }
1011#endif
1012
1013 pSession->pDevExt = NULL;
1014 pSession->Process = NIL_RTPROCESS;
1015 pSession->R0Process = NIL_RTR0PROCESS;
1016 vboxGuestCloseMemBalloon(pDevExt, pSession);
1017 /* Reset any mouse status flags which the session may have set. */
1018 VBoxGuestCommonIOCtl_SetMouseStatus(pDevExt, pSession, 0);
1019 RTMemFree(pSession);
1020}
1021
1022
1023/**
1024 * Allocates a wait-for-event entry.
1025 *
1026 * @returns The wait-for-event entry.
1027 * @param pDevExt The device extension.
1028 * @param pSession The session that's allocating this. Can be NULL.
1029 */
1030static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1031{
1032 /*
1033 * Allocate it one way or the other.
1034 */
1035 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1036 if (pWait)
1037 {
1038 RTSpinlockAcquire(pDevExt->EventSpinlock);
1039
1040 pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1041 if (pWait)
1042 RTListNodeRemove(&pWait->ListNode);
1043
1044 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1045 }
1046 if (!pWait)
1047 {
1048 static unsigned s_cErrors = 0;
1049 int rc;
1050
1051 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1052 if (!pWait)
1053 {
1054 if (s_cErrors++ < 32)
1055 LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
1056 return NULL;
1057 }
1058
1059 rc = RTSemEventMultiCreate(&pWait->Event);
1060 if (RT_FAILURE(rc))
1061 {
1062 if (s_cErrors++ < 32)
1063 LogRel(("VBoxGuestCommonIOCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
1064 RTMemFree(pWait);
1065 return NULL;
1066 }
1067
1068 pWait->ListNode.pNext = NULL;
1069 pWait->ListNode.pPrev = NULL;
1070 }
1071
1072 /*
1073 * Zero members just as an precaution.
1074 */
1075 pWait->fReqEvents = 0;
1076 pWait->fResEvents = 0;
1077#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1078 pWait->fPendingWakeUp = false;
1079 pWait->fFreeMe = false;
1080#endif
1081 pWait->pSession = pSession;
1082#ifdef VBOX_WITH_HGCM
1083 pWait->pHGCMReq = NULL;
1084#endif
1085 RTSemEventMultiReset(pWait->Event);
1086 return pWait;
1087}
1088
1089
1090/**
1091 * Frees the wait-for-event entry.
1092 *
1093 * The caller must own the wait spinlock !
1094 * The entry must be in a list!
1095 *
1096 * @param pDevExt The device extension.
1097 * @param pWait The wait-for-event entry to free.
1098 */
1099static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1100{
1101 pWait->fReqEvents = 0;
1102 pWait->fResEvents = 0;
1103#ifdef VBOX_WITH_HGCM
1104 pWait->pHGCMReq = NULL;
1105#endif
1106#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1107 Assert(!pWait->fFreeMe);
1108 if (pWait->fPendingWakeUp)
1109 pWait->fFreeMe = true;
1110 else
1111#endif
1112 {
1113 RTListNodeRemove(&pWait->ListNode);
1114 RTListAppend(&pDevExt->FreeList, &pWait->ListNode);
1115 }
1116}
1117
1118
1119/**
1120 * Frees the wait-for-event entry.
1121 *
1122 * @param pDevExt The device extension.
1123 * @param pWait The wait-for-event entry to free.
1124 */
1125static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1126{
1127 RTSpinlockAcquire(pDevExt->EventSpinlock);
1128 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1129 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1130}
1131
1132
1133#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1134/**
1135 * Processes the wake-up list.
1136 *
1137 * All entries in the wake-up list gets signalled and moved to the woken-up
1138 * list.
1139 *
1140 * @param pDevExt The device extension.
1141 */
1142void VBoxGuestWaitDoWakeUps(PVBOXGUESTDEVEXT pDevExt)
1143{
1144 if (!RTListIsEmpty(&pDevExt->WakeUpList))
1145 {
1146 RTSpinlockAcquire(pDevExt->EventSpinlock);
1147 for (;;)
1148 {
1149 int rc;
1150 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->WakeUpList, VBOXGUESTWAIT, ListNode);
1151 if (!pWait)
1152 break;
1153 pWait->fPendingWakeUp = true;
1154 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1155
1156 rc = RTSemEventMultiSignal(pWait->Event);
1157 AssertRC(rc);
1158
1159 RTSpinlockAcquire(pDevExt->EventSpinlock);
1160 pWait->fPendingWakeUp = false;
1161 if (!pWait->fFreeMe)
1162 {
1163 RTListNodeRemove(&pWait->ListNode);
1164 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1165 }
1166 else
1167 {
1168 pWait->fFreeMe = false;
1169 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1170 }
1171 }
1172 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1173 }
1174}
1175#endif /* VBOXGUEST_USE_DEFERRED_WAKE_UP */
1176
1177
1178/**
1179 * Modifies the guest capabilities.
1180 *
1181 * Should be called during driver init and termination.
1182 *
1183 * @returns VBox status code.
1184 * @param fOr The Or mask (what to enable).
1185 * @param fNot The Not mask (what to disable).
1186 */
1187int VBoxGuestSetGuestCapabilities(uint32_t fOr, uint32_t fNot)
1188{
1189 VMMDevReqGuestCapabilities2 *pReq;
1190 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
1191 if (RT_FAILURE(rc))
1192 {
1193 Log(("VBoxGuestSetGuestCapabilities: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1194 sizeof(*pReq), sizeof(*pReq), rc));
1195 return rc;
1196 }
1197
1198 pReq->u32OrMask = fOr;
1199 pReq->u32NotMask = fNot;
1200
1201 rc = VbglGRPerform(&pReq->header);
1202 if (RT_FAILURE(rc))
1203 Log(("VBoxGuestSetGuestCapabilities: VbglGRPerform failed, rc=%Rrc!\n", rc));
1204
1205 VbglGRFree(&pReq->header);
1206 return rc;
1207}
1208
1209
1210/**
1211 * Implements the fast (no input or output) type of IOCtls.
1212 *
1213 * This is currently just a placeholder stub inherited from the support driver code.
1214 *
1215 * @returns VBox status code.
1216 * @param iFunction The IOCtl function number.
1217 * @param pDevExt The device extension.
1218 * @param pSession The session.
1219 */
1220int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1221{
1222 Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1223
1224 NOREF(iFunction);
1225 NOREF(pDevExt);
1226 NOREF(pSession);
1227 return VERR_NOT_SUPPORTED;
1228}
1229
1230
1231/**
1232 * Return the VMM device port.
1233 *
1234 * returns IPRT status code.
1235 * @param pDevExt The device extension.
1236 * @param pInfo The request info.
1237 * @param pcbDataReturned (out) contains the number of bytes to return.
1238 */
1239static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
1240{
1241 Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
1242 pInfo->portAddress = pDevExt->IOPortBase;
1243 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
1244 if (pcbDataReturned)
1245 *pcbDataReturned = sizeof(*pInfo);
1246 return VINF_SUCCESS;
1247}
1248
1249
1250#ifndef RT_OS_WINDOWS
1251/**
1252 * Set the callback for the kernel mouse handler.
1253 *
1254 * returns IPRT status code.
1255 * @param pDevExt The device extension.
1256 * @param pNotify The new callback information.
1257 * @note This function takes the session spinlock to update the callback
1258 * information, but the interrupt handler will not do this. To make
1259 * sure that the interrupt handler sees a consistent structure, we
1260 * set the function pointer to NULL before updating the data and only
1261 * set it to the correct value once the data is updated. Since the
1262 * interrupt handler executes atomically this ensures that the data is
1263 * valid if the function pointer is non-NULL.
1264 */
1265int VBoxGuestCommonIOCtl_SetMouseNotifyCallback(PVBOXGUESTDEVEXT pDevExt, VBoxGuestMouseSetNotifyCallback *pNotify)
1266{
1267 Log(("VBoxGuestCommonIOCtl: SET_MOUSE_NOTIFY_CALLBACK\n"));
1268
1269 RTSpinlockAcquire(pDevExt->EventSpinlock);
1270 pDevExt->MouseNotifyCallback = *pNotify;
1271 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1272
1273 /* Make sure no active ISR is referencing the old data - hacky but should be
1274 * effective. */
1275 while (pDevExt->cISR > 0)
1276 ASMNopPause();
1277
1278 return VINF_SUCCESS;
1279}
1280#endif
1281
1282
1283/**
1284 * Worker VBoxGuestCommonIOCtl_WaitEvent.
1285 *
1286 * The caller enters the spinlock, we leave it.
1287 *
1288 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
1289 */
1290DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWaitEventInfo *pInfo,
1291 int iEvent, const uint32_t fReqEvents)
1292{
1293 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
1294 if (fMatches)
1295 {
1296 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
1297 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1298
1299 pInfo->u32EventFlagsOut = fMatches;
1300 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1301 if (fReqEvents & ~((uint32_t)1 << iEvent))
1302 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1303 else
1304 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1305 return VINF_SUCCESS;
1306 }
1307 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1308 return VERR_TIMEOUT;
1309}
1310
1311
1312static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1313 VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned, bool fInterruptible)
1314{
1315 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
1316 uint32_t fResEvents;
1317 int iEvent;
1318 PVBOXGUESTWAIT pWait;
1319 int rc;
1320
1321 pInfo->u32EventFlagsOut = 0;
1322 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1323 if (pcbDataReturned)
1324 *pcbDataReturned = sizeof(*pInfo);
1325
1326 /*
1327 * Copy and verify the input mask.
1328 */
1329 iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
1330 if (RT_UNLIKELY(iEvent < 0))
1331 {
1332 Log(("VBoxGuestCommonIOCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
1333 return VERR_INVALID_PARAMETER;
1334 }
1335
1336 /*
1337 * Check the condition up front, before doing the wait-for-event allocations.
1338 */
1339 RTSpinlockAcquire(pDevExt->EventSpinlock);
1340 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents);
1341 if (rc == VINF_SUCCESS)
1342 return rc;
1343
1344 if (!pInfo->u32TimeoutIn)
1345 {
1346 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1347 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
1348 return VERR_TIMEOUT;
1349 }
1350
1351 pWait = VBoxGuestWaitAlloc(pDevExt, pSession);
1352 if (!pWait)
1353 return VERR_NO_MEMORY;
1354 pWait->fReqEvents = fReqEvents;
1355
1356 /*
1357 * We've got the wait entry now, re-enter the spinlock and check for the condition.
1358 * If the wait condition is met, return.
1359 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
1360 */
1361 RTSpinlockAcquire(pDevExt->EventSpinlock);
1362 RTListAppend(&pDevExt->WaitList, &pWait->ListNode);
1363 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents);
1364 if (rc == VINF_SUCCESS)
1365 {
1366 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1367 return rc;
1368 }
1369
1370 if (fInterruptible)
1371 rc = RTSemEventMultiWaitNoResume(pWait->Event,
1372 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1373 else
1374 rc = RTSemEventMultiWait(pWait->Event,
1375 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1376
1377 /*
1378 * There is one special case here and that's when the semaphore is
1379 * destroyed upon device driver unload. This shouldn't happen of course,
1380 * but in case it does, just get out of here ASAP.
1381 */
1382 if (rc == VERR_SEM_DESTROYED)
1383 return rc;
1384
1385 /*
1386 * Unlink the wait item and dispose of it.
1387 */
1388 RTSpinlockAcquire(pDevExt->EventSpinlock);
1389 fResEvents = pWait->fResEvents;
1390 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1391 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1392
1393 /*
1394 * Now deal with the return code.
1395 */
1396 if ( fResEvents
1397 && fResEvents != UINT32_MAX)
1398 {
1399 pInfo->u32EventFlagsOut = fResEvents;
1400 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1401 if (fReqEvents & ~((uint32_t)1 << iEvent))
1402 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1403 else
1404 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1405 rc = VINF_SUCCESS;
1406 }
1407 else if ( fResEvents == UINT32_MAX
1408 || rc == VERR_INTERRUPTED)
1409 {
1410 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
1411 rc = VERR_INTERRUPTED;
1412 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
1413 }
1414 else if (rc == VERR_TIMEOUT)
1415 {
1416 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1417 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT (2)\n"));
1418 }
1419 else
1420 {
1421 if (RT_SUCCESS(rc))
1422 {
1423 static unsigned s_cErrors = 0;
1424 if (s_cErrors++ < 32)
1425 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
1426 rc = VERR_INTERNAL_ERROR;
1427 }
1428 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1429 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc\n", rc));
1430 }
1431
1432 return rc;
1433}
1434
1435
1436static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1437{
1438 PVBOXGUESTWAIT pWait;
1439 PVBOXGUESTWAIT pSafe;
1440 int rc = 0;
1441
1442 Log(("VBoxGuestCommonIOCtl: CANCEL_ALL_WAITEVENTS\n"));
1443
1444 /*
1445 * Walk the event list and wake up anyone with a matching session.
1446 */
1447 RTSpinlockAcquire(pDevExt->EventSpinlock);
1448 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
1449 {
1450 if (pWait->pSession == pSession)
1451 {
1452 pWait->fResEvents = UINT32_MAX;
1453 RTListNodeRemove(&pWait->ListNode);
1454#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1455 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
1456#else
1457 rc |= RTSemEventMultiSignal(pWait->Event);
1458 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1459#endif
1460 }
1461 }
1462 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1463 Assert(rc == 0);
1464
1465#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1466 VBoxGuestWaitDoWakeUps(pDevExt);
1467#endif
1468
1469 return VINF_SUCCESS;
1470}
1471
1472/**
1473 * Checks if the VMM request is allowed in the context of the given session.
1474 *
1475 * @returns VINF_SUCCESS or VERR_PERMISSION_DENIED.
1476 * @param pSession The calling session.
1477 * @param enmType The request type.
1478 * @param pReqHdr The request.
1479 */
1480static int VBoxGuestCheckIfVMMReqAllowed(PVBOXGUESTSESSION pSession, VMMDevRequestType enmType,
1481 VMMDevRequestHeader const *pReqHdr)
1482{
1483 /*
1484 * Categorize the request being made.
1485 */
1486 /** @todo This need quite some more work! */
1487 enum
1488 {
1489 kLevel_Invalid, kLevel_NoOne, kLevel_OnlyVBoxGuest, kLevel_OnlyKernel, kLevel_TrustedUsers, kLevel_AllUsers
1490 } enmRequired;
1491 switch (enmType)
1492 {
1493 /*
1494 * Deny access to anything we don't know or provide specialized I/O controls for.
1495 */
1496#ifdef VBOX_WITH_HGCM
1497 case VMMDevReq_HGCMConnect:
1498 case VMMDevReq_HGCMDisconnect:
1499# ifdef VBOX_WITH_64_BITS_GUESTS
1500 case VMMDevReq_HGCMCall32:
1501 case VMMDevReq_HGCMCall64:
1502# else
1503 case VMMDevReq_HGCMCall:
1504# endif /* VBOX_WITH_64_BITS_GUESTS */
1505 case VMMDevReq_HGCMCancel:
1506 case VMMDevReq_HGCMCancel2:
1507#endif /* VBOX_WITH_HGCM */
1508 default:
1509 enmRequired = kLevel_NoOne;
1510 break;
1511
1512 /*
1513 * There are a few things only this driver can do (and it doesn't use
1514 * the VMMRequst I/O control route anyway, but whatever).
1515 */
1516 case VMMDevReq_ReportGuestInfo:
1517 case VMMDevReq_ReportGuestInfo2:
1518 case VMMDevReq_GetHypervisorInfo:
1519 case VMMDevReq_SetHypervisorInfo:
1520 case VMMDevReq_RegisterPatchMemory:
1521 case VMMDevReq_DeregisterPatchMemory:
1522 case VMMDevReq_GetMemBalloonChangeRequest:
1523 enmRequired = kLevel_OnlyVBoxGuest;
1524 break;
1525
1526 /*
1527 * Trusted users apps only.
1528 */
1529 case VMMDevReq_QueryCredentials:
1530 case VMMDevReq_ReportCredentialsJudgement:
1531 case VMMDevReq_RegisterSharedModule:
1532 case VMMDevReq_UnregisterSharedModule:
1533 case VMMDevReq_WriteCoreDump:
1534 case VMMDevReq_GetCpuHotPlugRequest:
1535 case VMMDevReq_SetCpuHotPlugStatus:
1536 case VMMDevReq_CheckSharedModules:
1537 case VMMDevReq_GetPageSharingStatus:
1538 case VMMDevReq_DebugIsPageShared:
1539 case VMMDevReq_ReportGuestStats:
1540 case VMMDevReq_GetStatisticsChangeRequest:
1541 case VMMDevReq_ChangeMemBalloon:
1542 enmRequired = kLevel_TrustedUsers;
1543 break;
1544
1545 /*
1546 * Anyone.
1547 */
1548 case VMMDevReq_GetMouseStatus:
1549 case VMMDevReq_SetMouseStatus:
1550 case VMMDevReq_SetPointerShape:
1551 case VMMDevReq_GetHostVersion:
1552 case VMMDevReq_Idle:
1553 case VMMDevReq_GetHostTime:
1554 case VMMDevReq_SetPowerStatus:
1555 case VMMDevReq_AcknowledgeEvents:
1556 case VMMDevReq_CtlGuestFilterMask:
1557 case VMMDevReq_ReportGuestStatus:
1558 case VMMDevReq_GetDisplayChangeRequest:
1559 case VMMDevReq_VideoModeSupported:
1560 case VMMDevReq_GetHeightReduction:
1561 case VMMDevReq_GetDisplayChangeRequest2:
1562 case VMMDevReq_SetGuestCapabilities:
1563 case VMMDevReq_VideoModeSupported2:
1564 case VMMDevReq_VideoAccelEnable:
1565 case VMMDevReq_VideoAccelFlush:
1566 case VMMDevReq_VideoSetVisibleRegion:
1567 case VMMDevReq_GetDisplayChangeRequestEx:
1568 case VMMDevReq_GetSeamlessChangeRequest:
1569 case VMMDevReq_GetVRDPChangeRequest:
1570 case VMMDevReq_LogString:
1571 case VMMDevReq_GetSessionId:
1572 enmRequired = kLevel_AllUsers;
1573 break;
1574
1575 /*
1576 * Depends on the request parameters...
1577 */
1578 /** @todo this have to be changed into an I/O control and the facilities
1579 * tracked in the session so they can automatically be failed when the
1580 * session terminates without reporting the new status.
1581 *
1582 * The information presented by IGuest is not reliable without this! */
1583 case VMMDevReq_ReportGuestCapabilities:
1584 switch (((VMMDevReportGuestStatus const *)pReqHdr)->guestStatus.facility)
1585 {
1586 case VBoxGuestFacilityType_All:
1587 case VBoxGuestFacilityType_VBoxGuestDriver:
1588 enmRequired = kLevel_OnlyVBoxGuest;
1589 break;
1590 case VBoxGuestFacilityType_VBoxService:
1591 enmRequired = kLevel_TrustedUsers;
1592 break;
1593 case VBoxGuestFacilityType_VBoxTrayClient:
1594 case VBoxGuestFacilityType_Seamless:
1595 case VBoxGuestFacilityType_Graphics:
1596 default:
1597 enmRequired = kLevel_AllUsers;
1598 break;
1599 }
1600 break;
1601 }
1602
1603 /*
1604 * Check against the session.
1605 */
1606 switch (enmRequired)
1607 {
1608 default:
1609 case kLevel_NoOne:
1610 break;
1611 case kLevel_OnlyVBoxGuest:
1612 case kLevel_OnlyKernel:
1613 if (pSession->R0Process == NIL_RTR0PROCESS)
1614 return VINF_SUCCESS;
1615 break;
1616 case kLevel_TrustedUsers:
1617 case kLevel_AllUsers:
1618 return VINF_SUCCESS;
1619 }
1620
1621 return VERR_PERMISSION_DENIED;
1622}
1623
1624static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1625 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
1626{
1627 int rc;
1628 VMMDevRequestHeader *pReqCopy;
1629
1630 /*
1631 * Validate the header and request size.
1632 */
1633 const VMMDevRequestType enmType = pReqHdr->requestType;
1634 const uint32_t cbReq = pReqHdr->size;
1635 const uint32_t cbMinSize = vmmdevGetRequestSize(enmType);
1636
1637 Log(("VBoxGuestCommonIOCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
1638
1639 if (cbReq < cbMinSize)
1640 {
1641 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
1642 cbReq, cbMinSize, enmType));
1643 return VERR_INVALID_PARAMETER;
1644 }
1645 if (cbReq > cbData)
1646 {
1647 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
1648 cbData, cbReq, enmType));
1649 return VERR_INVALID_PARAMETER;
1650 }
1651 rc = VbglGRVerify(pReqHdr, cbData);
1652 if (RT_FAILURE(rc))
1653 {
1654 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc!!\n",
1655 cbData, cbReq, enmType, rc));
1656 return rc;
1657 }
1658
1659 rc = VBoxGuestCheckIfVMMReqAllowed(pSession, enmType, pReqHdr);
1660 if (RT_FAILURE(rc))
1661 {
1662 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: Operation not allowed! type=%#x rc=%Rrc\n", enmType, rc));
1663 return rc;
1664 }
1665
1666 /*
1667 * Make a copy of the request in the physical memory heap so
1668 * the VBoxGuestLibrary can more easily deal with the request.
1669 * (This is really a waste of time since the OS or the OS specific
1670 * code has already buffered or locked the input/output buffer, but
1671 * it does makes things a bit simpler wrt to phys address.)
1672 */
1673 rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
1674 if (RT_FAILURE(rc))
1675 {
1676 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1677 cbReq, cbReq, rc));
1678 return rc;
1679 }
1680 memcpy(pReqCopy, pReqHdr, cbReq);
1681
1682 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
1683 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
1684
1685 rc = VbglGRPerform(pReqCopy);
1686 if ( RT_SUCCESS(rc)
1687 && RT_SUCCESS(pReqCopy->rc))
1688 {
1689 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
1690 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
1691
1692 memcpy(pReqHdr, pReqCopy, cbReq);
1693 if (pcbDataReturned)
1694 *pcbDataReturned = cbReq;
1695 }
1696 else if (RT_FAILURE(rc))
1697 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
1698 else
1699 {
1700 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
1701 rc = pReqCopy->rc;
1702 }
1703
1704 VbglGRFree(pReqCopy);
1705 return rc;
1706}
1707
1708
1709static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, VBoxGuestFilterMaskInfo *pInfo)
1710{
1711 VMMDevCtlGuestFilterMask *pReq;
1712 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
1713 if (RT_FAILURE(rc))
1714 {
1715 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1716 sizeof(*pReq), sizeof(*pReq), rc));
1717 return rc;
1718 }
1719
1720 pReq->u32OrMask = pInfo->u32OrMask;
1721 pReq->u32NotMask = pInfo->u32NotMask;
1722 pReq->u32NotMask &= ~pDevExt->fFixedEvents; /* don't permit these to be cleared! */
1723 rc = VbglGRPerform(&pReq->header);
1724 if (RT_FAILURE(rc))
1725 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: VbglGRPerform failed, rc=%Rrc!\n", rc));
1726
1727 VbglGRFree(&pReq->header);
1728 return rc;
1729}
1730
1731#ifdef VBOX_WITH_HGCM
1732
1733AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
1734
1735/** Worker for VBoxGuestHGCMAsyncWaitCallback*. */
1736static int VBoxGuestHGCMAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
1737 bool fInterruptible, uint32_t cMillies)
1738{
1739 int rc;
1740
1741 /*
1742 * Check to see if the condition was met by the time we got here.
1743 *
1744 * We create a simple poll loop here for dealing with out-of-memory
1745 * conditions since the caller isn't necessarily able to deal with
1746 * us returning too early.
1747 */
1748 PVBOXGUESTWAIT pWait;
1749 for (;;)
1750 {
1751 RTSpinlockAcquire(pDevExt->EventSpinlock);
1752 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1753 {
1754 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1755 return VINF_SUCCESS;
1756 }
1757 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1758
1759 pWait = VBoxGuestWaitAlloc(pDevExt, NULL);
1760 if (pWait)
1761 break;
1762 if (fInterruptible)
1763 return VERR_INTERRUPTED;
1764 RTThreadSleep(1);
1765 }
1766 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
1767 pWait->pHGCMReq = pHdr;
1768
1769 /*
1770 * Re-enter the spinlock and re-check for the condition.
1771 * If the condition is met, return.
1772 * Otherwise link us into the HGCM wait list and go to sleep.
1773 */
1774 RTSpinlockAcquire(pDevExt->EventSpinlock);
1775 RTListAppend(&pDevExt->HGCMWaitList, &pWait->ListNode);
1776 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1777 {
1778 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1779 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1780 return VINF_SUCCESS;
1781 }
1782 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1783
1784 if (fInterruptible)
1785 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
1786 else
1787 rc = RTSemEventMultiWait(pWait->Event, cMillies);
1788 if (rc == VERR_SEM_DESTROYED)
1789 return rc;
1790
1791 /*
1792 * Unlink, free and return.
1793 */
1794 if ( RT_FAILURE(rc)
1795 && rc != VERR_TIMEOUT
1796 && ( !fInterruptible
1797 || rc != VERR_INTERRUPTED))
1798 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
1799
1800 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1801 return rc;
1802}
1803
1804
1805/**
1806 * This is a callback for dealing with async waits.
1807 *
1808 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1809 */
1810static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
1811{
1812 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1813 Log(("VBoxGuestHGCMAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
1814 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1815 pDevExt,
1816 false /* fInterruptible */,
1817 u32User /* cMillies */);
1818}
1819
1820
1821/**
1822 * This is a callback for dealing with async waits with a timeout.
1823 *
1824 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1825 */
1826static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr,
1827 void *pvUser, uint32_t u32User)
1828{
1829 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1830 Log(("VBoxGuestHGCMAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
1831 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1832 pDevExt,
1833 true /* fInterruptible */,
1834 u32User /* cMillies */ );
1835
1836}
1837
1838
1839static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1840 VBoxGuestHGCMConnectInfo *pInfo, size_t *pcbDataReturned)
1841{
1842 int rc;
1843
1844 /*
1845 * The VbglHGCMConnect call will invoke the callback if the HGCM
1846 * call is performed in an ASYNC fashion. The function is not able
1847 * to deal with cancelled requests.
1848 */
1849 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
1850 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
1851 ? pInfo->Loc.u.host.achName : "<not local host>"));
1852
1853 rc = VbglR0HGCMInternalConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1854 if (RT_SUCCESS(rc))
1855 {
1856 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
1857 pInfo->u32ClientID, pInfo->result, rc));
1858 if (RT_SUCCESS(pInfo->result))
1859 {
1860 /*
1861 * Append the client id to the client id table.
1862 * If the table has somehow become filled up, we'll disconnect the session.
1863 */
1864 unsigned i;
1865 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1866 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1867 if (!pSession->aHGCMClientIds[i])
1868 {
1869 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
1870 break;
1871 }
1872 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
1873 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1874 {
1875 static unsigned s_cErrors = 0;
1876 VBoxGuestHGCMDisconnectInfo Info;
1877
1878 if (s_cErrors++ < 32)
1879 LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
1880
1881 Info.result = 0;
1882 Info.u32ClientID = pInfo->u32ClientID;
1883 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1884 return VERR_TOO_MANY_OPEN_FILES;
1885 }
1886 }
1887 if (pcbDataReturned)
1888 *pcbDataReturned = sizeof(*pInfo);
1889 }
1890 return rc;
1891}
1892
1893
1894static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
1895 size_t *pcbDataReturned)
1896{
1897 /*
1898 * Validate the client id and invalidate its entry while we're in the call.
1899 */
1900 int rc;
1901 const uint32_t u32ClientId = pInfo->u32ClientID;
1902 unsigned i;
1903 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1904 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1905 if (pSession->aHGCMClientIds[i] == u32ClientId)
1906 {
1907 pSession->aHGCMClientIds[i] = UINT32_MAX;
1908 break;
1909 }
1910 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
1911 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1912 {
1913 static unsigned s_cErrors = 0;
1914 if (s_cErrors++ > 32)
1915 LogRel(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
1916 return VERR_INVALID_HANDLE;
1917 }
1918
1919 /*
1920 * The VbglHGCMConnect call will invoke the callback if the HGCM
1921 * call is performed in an ASYNC fashion. The function is not able
1922 * to deal with cancelled requests.
1923 */
1924 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
1925 rc = VbglR0HGCMInternalDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1926 if (RT_SUCCESS(rc))
1927 {
1928 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
1929 if (pcbDataReturned)
1930 *pcbDataReturned = sizeof(*pInfo);
1931 }
1932
1933 /* Update the client id array according to the result. */
1934 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1935 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
1936 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
1937 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
1938
1939 return rc;
1940}
1941
1942
1943static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt,
1944 PVBOXGUESTSESSION pSession,
1945 VBoxGuestHGCMCallInfo *pInfo,
1946 uint32_t cMillies, bool fInterruptible, bool f32bit, bool fUserData,
1947 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
1948{
1949 const uint32_t u32ClientId = pInfo->u32ClientID;
1950 uint32_t fFlags;
1951 size_t cbActual;
1952 unsigned i;
1953 int rc;
1954
1955 /*
1956 * Some more validations.
1957 */
1958 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
1959 {
1960 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
1961 return VERR_INVALID_PARAMETER;
1962 }
1963
1964 cbActual = cbExtra + sizeof(*pInfo);
1965#ifdef RT_ARCH_AMD64
1966 if (f32bit)
1967 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
1968 else
1969#endif
1970 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
1971 if (cbData < cbActual)
1972 {
1973 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
1974 cbData, cbActual));
1975 return VERR_INVALID_PARAMETER;
1976 }
1977
1978 /*
1979 * Validate the client id.
1980 */
1981 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1982 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1983 if (pSession->aHGCMClientIds[i] == u32ClientId)
1984 break;
1985 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
1986 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
1987 {
1988 static unsigned s_cErrors = 0;
1989 if (s_cErrors++ > 32)
1990 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
1991 return VERR_INVALID_HANDLE;
1992 }
1993
1994 /*
1995 * The VbglHGCMCall call will invoke the callback if the HGCM
1996 * call is performed in an ASYNC fashion. This function can
1997 * deal with cancelled requests, so we let user more requests
1998 * be interruptible (should add a flag for this later I guess).
1999 */
2000 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
2001 fFlags = !fUserData && pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
2002#ifdef RT_ARCH_AMD64
2003 if (f32bit)
2004 {
2005 if (fInterruptible)
2006 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2007 else
2008 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
2009 }
2010 else
2011#endif
2012 {
2013 if (fInterruptible)
2014 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2015 else
2016 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
2017 }
2018 if (RT_SUCCESS(rc))
2019 {
2020 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
2021 if (pcbDataReturned)
2022 *pcbDataReturned = cbActual;
2023 }
2024 else
2025 {
2026 if ( rc != VERR_INTERRUPTED
2027 && rc != VERR_TIMEOUT)
2028 {
2029 static unsigned s_cErrors = 0;
2030 if (s_cErrors++ < 32)
2031 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
2032 }
2033 else
2034 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
2035 }
2036 return rc;
2037}
2038
2039
2040#endif /* VBOX_WITH_HGCM */
2041
2042/**
2043 * Handle VBOXGUEST_IOCTL_CHECK_BALLOON from R3.
2044 *
2045 * Ask the host for the size of the balloon and try to set it accordingly. If
2046 * this approach fails because it's not supported, return with fHandleInR3 set
2047 * and let the user land supply memory we can lock via the other ioctl.
2048 *
2049 * @returns VBox status code.
2050 *
2051 * @param pDevExt The device extension.
2052 * @param pSession The session.
2053 * @param pInfo The output buffer.
2054 * @param pcbDataReturned Where to store the amount of returned data. Can
2055 * be NULL.
2056 */
2057static int VBoxGuestCommonIOCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2058 VBoxGuestCheckBalloonInfo *pInfo, size_t *pcbDataReturned)
2059{
2060 VMMDevGetMemBalloonChangeRequest *pReq;
2061 int rc;
2062
2063 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON\n"));
2064 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2065 AssertRCReturn(rc, rc);
2066
2067 /*
2068 * The first user trying to query/change the balloon becomes the
2069 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
2070 */
2071 if ( pDevExt->MemBalloon.pOwner != pSession
2072 && pDevExt->MemBalloon.pOwner == NULL)
2073 pDevExt->MemBalloon.pOwner = pSession;
2074
2075 if (pDevExt->MemBalloon.pOwner == pSession)
2076 {
2077 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
2078 if (RT_SUCCESS(rc))
2079 {
2080 /*
2081 * This is a response to that event. Setting this bit means that
2082 * we request the value from the host and change the guest memory
2083 * balloon according to this value.
2084 */
2085 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
2086 rc = VbglGRPerform(&pReq->header);
2087 if (RT_SUCCESS(rc))
2088 {
2089 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
2090 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
2091
2092 pInfo->cBalloonChunks = pReq->cBalloonChunks;
2093 pInfo->fHandleInR3 = false;
2094
2095 rc = vboxGuestSetBalloonSizeKernel(pDevExt, pReq->cBalloonChunks, &pInfo->fHandleInR3);
2096 /* Ignore various out of memory failures. */
2097 if ( rc == VERR_NO_MEMORY
2098 || rc == VERR_NO_PHYS_MEMORY
2099 || rc == VERR_NO_CONT_MEMORY)
2100 rc = VINF_SUCCESS;
2101
2102 if (pcbDataReturned)
2103 *pcbDataReturned = sizeof(VBoxGuestCheckBalloonInfo);
2104 }
2105 else
2106 LogRel(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON: VbglGRPerform failed. rc=%Rrc\n", rc));
2107 VbglGRFree(&pReq->header);
2108 }
2109 }
2110 else
2111 rc = VERR_PERMISSION_DENIED;
2112
2113 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2114 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON returns %Rrc\n", rc));
2115 return rc;
2116}
2117
2118
2119/**
2120 * Handle a request for changing the memory balloon.
2121 *
2122 * @returns VBox status code.
2123 *
2124 * @param pDevExt The device extention.
2125 * @param pSession The session.
2126 * @param pInfo The change request structure (input).
2127 * @param pcbDataReturned Where to store the amount of returned data. Can
2128 * be NULL.
2129 */
2130static int VBoxGuestCommonIOCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2131 VBoxGuestChangeBalloonInfo *pInfo, size_t *pcbDataReturned)
2132{
2133 int rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2134 AssertRCReturn(rc, rc);
2135
2136 if (!pDevExt->MemBalloon.fUseKernelAPI)
2137 {
2138 /*
2139 * The first user trying to query/change the balloon becomes the
2140 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
2141 */
2142 if ( pDevExt->MemBalloon.pOwner != pSession
2143 && pDevExt->MemBalloon.pOwner == NULL)
2144 pDevExt->MemBalloon.pOwner = pSession;
2145
2146 if (pDevExt->MemBalloon.pOwner == pSession)
2147 {
2148 rc = vboxGuestSetBalloonSizeFromUser(pDevExt, pSession, pInfo->u64ChunkAddr, !!pInfo->fInflate);
2149 if (pcbDataReturned)
2150 *pcbDataReturned = 0;
2151 }
2152 else
2153 rc = VERR_PERMISSION_DENIED;
2154 }
2155 else
2156 rc = VERR_PERMISSION_DENIED;
2157
2158 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2159 return rc;
2160}
2161
2162
2163/**
2164 * Handle a request for writing a core dump of the guest on the host.
2165 *
2166 * @returns VBox status code.
2167 *
2168 * @param pDevExt The device extension.
2169 * @param pInfo The output buffer.
2170 */
2171static int VBoxGuestCommonIOCtl_WriteCoreDump(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWriteCoreDump *pInfo)
2172{
2173 VMMDevReqWriteCoreDump *pReq = NULL;
2174 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_WriteCoreDump);
2175 if (RT_FAILURE(rc))
2176 {
2177 Log(("VBoxGuestCommonIOCtl: WRITE_CORE_DUMP: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
2178 sizeof(*pReq), sizeof(*pReq), rc));
2179 return rc;
2180 }
2181
2182 pReq->fFlags = pInfo->fFlags;
2183 rc = VbglGRPerform(&pReq->header);
2184 if (RT_FAILURE(rc))
2185 Log(("VBoxGuestCommonIOCtl: WRITE_CORE_DUMP: VbglGRPerform failed, rc=%Rrc!\n", rc));
2186
2187 VbglGRFree(&pReq->header);
2188 return rc;
2189}
2190
2191
2192#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
2193/**
2194 * Enables the VRDP session and saves its session ID.
2195 *
2196 * @returns VBox status code.
2197 *
2198 * @param pDevExt The device extention.
2199 * @param pSession The session.
2200 */
2201static int VBoxGuestCommonIOCtl_EnableVRDPSession(VBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2202{
2203 /* Nothing to do here right now, since this only is supported on Windows at the moment. */
2204 return VERR_NOT_IMPLEMENTED;
2205}
2206
2207
2208/**
2209 * Disables the VRDP session.
2210 *
2211 * @returns VBox status code.
2212 *
2213 * @param pDevExt The device extention.
2214 * @param pSession The session.
2215 */
2216static int VBoxGuestCommonIOCtl_DisableVRDPSession(VBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2217{
2218 /* Nothing to do here right now, since this only is supported on Windows at the moment. */
2219 return VERR_NOT_IMPLEMENTED;
2220}
2221#endif /* VBOX_WITH_VRDP_SESSION_HANDLING */
2222
2223#ifdef DEBUG
2224/** Unit test SetMouseStatus instead of really executing the request. */
2225static bool g_test_fSetMouseStatus = false;
2226/** When unit testing SetMouseStatus, the fake RC for the GR to return. */
2227static int g_test_SetMouseStatusGRRC;
2228/** When unit testing SetMouseStatus this will be set to the status passed to
2229 * the GR. */
2230static uint32_t g_test_statusSetMouseStatus;
2231#endif
2232
2233static int vboxguestcommonSetMouseStatus(uint32_t fFeatures)
2234{
2235 VMMDevReqMouseStatus *pReq;
2236 int rc;
2237
2238 LogRelFlowFunc(("fFeatures=%u\n", (int) fFeatures));
2239 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetMouseStatus);
2240 if (RT_SUCCESS(rc))
2241 {
2242 pReq->mouseFeatures = fFeatures;
2243 pReq->pointerXPos = 0;
2244 pReq->pointerYPos = 0;
2245#ifdef DEBUG
2246 if (g_test_fSetMouseStatus)
2247 {
2248 g_test_statusSetMouseStatus = pReq->mouseFeatures;
2249 rc = g_test_SetMouseStatusGRRC;
2250 }
2251 else
2252#endif
2253 rc = VbglGRPerform(&pReq->header);
2254 VbglGRFree(&pReq->header);
2255 }
2256 LogRelFlowFunc(("rc=%Rrc\n", rc));
2257 return rc;
2258}
2259
2260
2261/**
2262 * Sets the mouse status features for this session and updates them
2263 * globally. We aim to ensure that if several threads call this in
2264 * parallel the most recent status will always end up being set.
2265 *
2266 * @returns VBox status code.
2267 *
2268 * @param pDevExt The device extention.
2269 * @param pSession The session.
2270 * @param fFeatures New bitmap of enabled features.
2271 */
2272static int VBoxGuestCommonIOCtl_SetMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fFeatures)
2273{
2274 uint32_t fNewDevExtStatus = 0;
2275 unsigned i;
2276 int rc;
2277 /* Exit early if nothing has changed - hack to work around the
2278 * Windows Additions not using the common code. */
2279 bool fNoAction;
2280
2281 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2282
2283 for (i = 0; i < sizeof(fFeatures) * 8; i++)
2284 {
2285 if (RT_BIT_32(i) & VMMDEV_MOUSE_GUEST_MASK)
2286 {
2287 if ( (RT_BIT_32(i) & fFeatures)
2288 && !(RT_BIT_32(i) & pSession->fMouseStatus))
2289 pDevExt->acMouseFeatureUsage[i]++;
2290 else if ( !(RT_BIT_32(i) & fFeatures)
2291 && (RT_BIT_32(i) & pSession->fMouseStatus))
2292 pDevExt->acMouseFeatureUsage[i]--;
2293 }
2294 if (pDevExt->acMouseFeatureUsage[i] > 0)
2295 fNewDevExtStatus |= RT_BIT_32(i);
2296 }
2297
2298 pSession->fMouseStatus = fFeatures & VMMDEV_MOUSE_GUEST_MASK;
2299 fNoAction = (pDevExt->fMouseStatus == fNewDevExtStatus);
2300 pDevExt->fMouseStatus = fNewDevExtStatus;
2301
2302 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
2303 if (fNoAction)
2304 return VINF_SUCCESS;
2305
2306 do
2307 {
2308 fNewDevExtStatus = pDevExt->fMouseStatus;
2309 rc = vboxguestcommonSetMouseStatus(fNewDevExtStatus);
2310 } while ( RT_SUCCESS(rc)
2311 && fNewDevExtStatus != pDevExt->fMouseStatus);
2312
2313 return rc;
2314}
2315
2316
2317#ifdef DEBUG
2318/** Unit test for the SET_MOUSE_STATUS IoCtl. Since this is closely tied to
2319 * the code in question it probably makes most sense to keep it next to the
2320 * code. */
2321static void testSetMouseStatus(void)
2322{
2323 uint32_t u32Data;
2324 int rc;
2325 RTSPINLOCK Spinlock;
2326
2327 g_test_fSetMouseStatus = true;
2328 rc = RTSpinlockCreate(&Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestTest");
2329 AssertRCReturnVoid(rc);
2330 {
2331 VBOXGUESTDEVEXT DevExt = { 0 };
2332 VBOXGUESTSESSION Session = { 0 };
2333
2334 g_test_statusSetMouseStatus = ~0;
2335 g_test_SetMouseStatusGRRC = VINF_SUCCESS;
2336 DevExt.SessionSpinlock = Spinlock;
2337 u32Data = VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE;
2338 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2339 &Session, &u32Data, sizeof(u32Data), NULL);
2340 AssertRCSuccess(rc);
2341 AssertMsg( g_test_statusSetMouseStatus
2342 == VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE,
2343 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2344 DevExt.acMouseFeatureUsage[ASMBitFirstSetU32(VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR) - 1] = 1;
2345 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2346 &Session, &u32Data, sizeof(u32Data), NULL);
2347 AssertRCSuccess(rc);
2348 AssertMsg( g_test_statusSetMouseStatus
2349 == ( VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE
2350 | VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR),
2351 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2352 u32Data = VMMDEV_MOUSE_HOST_WANTS_ABSOLUTE; /* Can't change this */
2353 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2354 &Session, &u32Data, sizeof(u32Data), NULL);
2355 AssertRCSuccess(rc);
2356 AssertMsg( g_test_statusSetMouseStatus
2357 == VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR,
2358 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2359 u32Data = VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR;
2360 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2361 &Session, &u32Data, sizeof(u32Data), NULL);
2362 AssertRCSuccess(rc);
2363 AssertMsg( g_test_statusSetMouseStatus
2364 == VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR,
2365 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2366 u32Data = 0;
2367 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2368 &Session, &u32Data, sizeof(u32Data), NULL);
2369 AssertRCSuccess(rc);
2370 AssertMsg( g_test_statusSetMouseStatus
2371 == VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR,
2372 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2373 AssertMsg(DevExt.acMouseFeatureUsage[ASMBitFirstSetU32(VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR) - 1] == 1,
2374 ("Actual value: %d\n", DevExt.acMouseFeatureUsage[ASMBitFirstSetU32(VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR)]));
2375 g_test_SetMouseStatusGRRC = VERR_UNRESOLVED_ERROR;
2376 /* This should succeed as the host request should not be made
2377 * since nothing has changed. */
2378 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2379 &Session, &u32Data, sizeof(u32Data), NULL);
2380 AssertRCSuccess(rc);
2381 /* This should fail with VERR_UNRESOLVED_ERROR as set above. */
2382 u32Data = VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE;
2383 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2384 &Session, &u32Data, sizeof(u32Data), NULL);
2385 AssertMsg(rc == VERR_UNRESOLVED_ERROR, ("rc == %Rrc\n", rc));
2386 /* Untested paths: out of memory; race setting status to host */
2387 }
2388 RTSpinlockDestroy(Spinlock);
2389 g_test_fSetMouseStatus = false;
2390}
2391#endif
2392
2393
2394/**
2395 * Guest backdoor logging.
2396 *
2397 * @returns VBox status code.
2398 *
2399 * @param pDevExt The device extension.
2400 * @param pch The log message (need not be NULL terminated).
2401 * @param cbData Size of the buffer.
2402 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2403 */
2404static int VBoxGuestCommonIOCtl_Log(PVBOXGUESTDEVEXT pDevExt, const char *pch, size_t cbData, size_t *pcbDataReturned)
2405{
2406 NOREF(pch);
2407 NOREF(cbData);
2408 if (pDevExt->fLoggingEnabled)
2409 RTLogBackdoorPrintf("%.*s", cbData, pch);
2410 else
2411 Log(("%.*s", cbData, pch));
2412 if (pcbDataReturned)
2413 *pcbDataReturned = 0;
2414 return VINF_SUCCESS;
2415}
2416
2417
2418/**
2419 * Common IOCtl for user to kernel and kernel to kernel communication.
2420 *
2421 * This function only does the basic validation and then invokes
2422 * worker functions that takes care of each specific function.
2423 *
2424 * @returns VBox status code.
2425 *
2426 * @param iFunction The requested function.
2427 * @param pDevExt The device extension.
2428 * @param pSession The client session.
2429 * @param pvData The input/output data buffer. Can be NULL depending on the function.
2430 * @param cbData The max size of the data buffer.
2431 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2432 */
2433int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2434 void *pvData, size_t cbData, size_t *pcbDataReturned)
2435{
2436 int rc;
2437 Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
2438 iFunction, pDevExt, pSession, pvData, cbData));
2439
2440 /*
2441 * Make sure the returned data size is set to zero.
2442 */
2443 if (pcbDataReturned)
2444 *pcbDataReturned = 0;
2445
2446 /*
2447 * Define some helper macros to simplify validation.
2448 */
2449#define CHECKRET_RING0(mnemonic) \
2450 do { \
2451 if (pSession->R0Process != NIL_RTR0PROCESS) \
2452 { \
2453 LogFunc((mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
2454 pSession->Process, (uintptr_t)pSession->R0Process)); \
2455 return VERR_PERMISSION_DENIED; \
2456 } \
2457 } while (0)
2458#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
2459 do { \
2460 if (cbData < (cbMin)) \
2461 { \
2462 LogFunc((mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
2463 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
2464 return VERR_BUFFER_OVERFLOW; \
2465 } \
2466 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
2467 { \
2468 LogFunc((mnemonic ": Invalid pointer %p\n", pvData)); \
2469 return VERR_INVALID_POINTER; \
2470 } \
2471 } while (0)
2472#define CHECKRET_SIZE(mnemonic, cb) \
2473 do { \
2474 if (cbData != (cb)) \
2475 { \
2476 LogFunc((mnemonic ": cbData=%#zx (%zu) expected is %#zx (%zu)\n", \
2477 cbData, cbData, (size_t)(cb), (size_t)(cb))); \
2478 return VERR_BUFFER_OVERFLOW; \
2479 } \
2480 if ((cb) != 0 && !VALID_PTR(pvData)) \
2481 { \
2482 LogFunc((mnemonic ": Invalid pointer %p\n", pvData)); \
2483 return VERR_INVALID_POINTER; \
2484 } \
2485 } while (0)
2486
2487
2488 /*
2489 * Deal with variably sized requests first.
2490 */
2491 rc = VINF_SUCCESS;
2492 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
2493 {
2494 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
2495 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
2496 }
2497#ifdef VBOX_WITH_HGCM
2498 /*
2499 * These ones are a bit tricky.
2500 */
2501 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
2502 {
2503 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2504 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2505 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2506 fInterruptible, false /*f32bit*/, false /* fUserData */,
2507 0, cbData, pcbDataReturned);
2508 }
2509 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
2510 {
2511 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2512 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2513 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2514 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2515 false /*f32bit*/, false /* fUserData */,
2516 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2517 }
2518 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_USERDATA(0)))
2519 {
2520 bool fInterruptible = true;
2521 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2522 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2523 fInterruptible, false /*f32bit*/, true /* fUserData */,
2524 0, cbData, pcbDataReturned);
2525 }
2526# ifdef RT_ARCH_AMD64
2527 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
2528 {
2529 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2530 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2531 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2532 fInterruptible, true /*f32bit*/, false /* fUserData */,
2533 0, cbData, pcbDataReturned);
2534 }
2535 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
2536 {
2537 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2538 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2539 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2540 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2541 true /*f32bit*/, false /* fUserData */,
2542 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2543 }
2544# endif
2545#endif /* VBOX_WITH_HGCM */
2546 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
2547 {
2548 CHECKRET_MIN_SIZE("LOG", 1);
2549 rc = VBoxGuestCommonIOCtl_Log(pDevExt, (char *)pvData, cbData, pcbDataReturned);
2550 }
2551 else
2552 {
2553 switch (iFunction)
2554 {
2555 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
2556 CHECKRET_RING0("GETVMMDEVPORT");
2557 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
2558 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
2559 break;
2560
2561#ifndef RT_OS_WINDOWS /* Windows has its own implementation of this. */
2562 case VBOXGUEST_IOCTL_SET_MOUSE_NOTIFY_CALLBACK:
2563 CHECKRET_RING0("SET_MOUSE_NOTIFY_CALLBACK");
2564 CHECKRET_SIZE("SET_MOUSE_NOTIFY_CALLBACK", sizeof(VBoxGuestMouseSetNotifyCallback));
2565 rc = VBoxGuestCommonIOCtl_SetMouseNotifyCallback(pDevExt, (VBoxGuestMouseSetNotifyCallback *)pvData);
2566 break;
2567#endif
2568
2569 case VBOXGUEST_IOCTL_WAITEVENT:
2570 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
2571 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, pSession, (VBoxGuestWaitEventInfo *)pvData,
2572 pcbDataReturned, pSession->R0Process != NIL_RTR0PROCESS);
2573 break;
2574
2575 case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
2576 if (cbData != 0)
2577 rc = VERR_INVALID_PARAMETER;
2578 rc = VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
2579 break;
2580
2581 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
2582 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
2583 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, (VBoxGuestFilterMaskInfo *)pvData);
2584 break;
2585
2586#ifdef VBOX_WITH_HGCM
2587 case VBOXGUEST_IOCTL_HGCM_CONNECT:
2588# ifdef RT_ARCH_AMD64
2589 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
2590# endif
2591 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
2592 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
2593 break;
2594
2595 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
2596# ifdef RT_ARCH_AMD64
2597 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
2598# endif
2599 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
2600 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
2601 break;
2602#endif /* VBOX_WITH_HGCM */
2603
2604 case VBOXGUEST_IOCTL_CHECK_BALLOON:
2605 CHECKRET_MIN_SIZE("CHECK_MEMORY_BALLOON", sizeof(VBoxGuestCheckBalloonInfo));
2606 rc = VBoxGuestCommonIOCtl_CheckMemoryBalloon(pDevExt, pSession, (VBoxGuestCheckBalloonInfo *)pvData, pcbDataReturned);
2607 break;
2608
2609 case VBOXGUEST_IOCTL_CHANGE_BALLOON:
2610 CHECKRET_MIN_SIZE("CHANGE_MEMORY_BALLOON", sizeof(VBoxGuestChangeBalloonInfo));
2611 rc = VBoxGuestCommonIOCtl_ChangeMemoryBalloon(pDevExt, pSession, (VBoxGuestChangeBalloonInfo *)pvData, pcbDataReturned);
2612 break;
2613
2614 case VBOXGUEST_IOCTL_WRITE_CORE_DUMP:
2615 CHECKRET_MIN_SIZE("WRITE_CORE_DUMP", sizeof(VBoxGuestWriteCoreDump));
2616 rc = VBoxGuestCommonIOCtl_WriteCoreDump(pDevExt, (VBoxGuestWriteCoreDump *)pvData);
2617 break;
2618
2619#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
2620 case VBOXGUEST_IOCTL_ENABLE_VRDP_SESSION:
2621 rc = VBoxGuestCommonIOCtl_EnableVRDPSession(pDevExt, pSession);
2622 break;
2623
2624 case VBOXGUEST_IOCTL_DISABLE_VRDP_SESSION:
2625 rc = VBoxGuestCommonIOCtl_DisableVRDPSession(pDevExt, pSession);
2626 break;
2627#endif /* VBOX_WITH_VRDP_SESSION_HANDLING */
2628 case VBOXGUEST_IOCTL_SET_MOUSE_STATUS:
2629 CHECKRET_SIZE("SET_MOUSE_STATUS", sizeof(uint32_t));
2630 rc = VBoxGuestCommonIOCtl_SetMouseStatus(pDevExt, pSession,
2631 *(uint32_t *)pvData);
2632 break;
2633
2634#ifdef VBOX_WITH_DPC_LATENCY_CHECKER
2635 case VBOXGUEST_IOCTL_DPC_LATENCY_CHECKER:
2636 CHECKRET_SIZE("DPC_LATENCY_CHECKER", 0);
2637 rc = VbgdNtIOCtl_DpcLatencyChecker();
2638 break;
2639#endif
2640
2641 default:
2642 {
2643 LogRel(("VBoxGuestCommonIOCtl: Unknown request iFunction=%#x Stripped size=%#x\n", iFunction,
2644 VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
2645 rc = VERR_NOT_SUPPORTED;
2646 break;
2647 }
2648 }
2649 }
2650
2651 Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
2652 return rc;
2653}
2654
2655
2656
2657/**
2658 * Common interrupt service routine.
2659 *
2660 * This deals with events and with waking up thread waiting for those events.
2661 *
2662 * @returns true if it was our interrupt, false if it wasn't.
2663 * @param pDevExt The VBoxGuest device extension.
2664 */
2665bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
2666{
2667#ifndef RT_OS_WINDOWS
2668 VBoxGuestMouseSetNotifyCallback MouseNotifyCallback = { NULL, NULL };
2669#endif
2670 bool fMousePositionChanged = false;
2671 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
2672 int rc = 0;
2673 bool fOurIrq;
2674
2675 /*
2676 * Make sure we've initialized the device extension.
2677 */
2678 if (RT_UNLIKELY(!pReq))
2679 return false;
2680
2681 /*
2682 * Enter the spinlock, increase the ISR count and check if it's our IRQ or
2683 * not.
2684 */
2685 RTSpinlockAcquire(pDevExt->EventSpinlock);
2686 ASMAtomicIncU32(&pDevExt->cISR);
2687 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
2688 if (fOurIrq)
2689 {
2690 /*
2691 * Acknowlegde events.
2692 * We don't use VbglGRPerform here as it may take another spinlocks.
2693 */
2694 pReq->header.rc = VERR_INTERNAL_ERROR;
2695 pReq->events = 0;
2696 ASMCompilerBarrier();
2697 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
2698 ASMCompilerBarrier(); /* paranoia */
2699 if (RT_SUCCESS(pReq->header.rc))
2700 {
2701 uint32_t fEvents = pReq->events;
2702 PVBOXGUESTWAIT pWait;
2703 PVBOXGUESTWAIT pSafe;
2704
2705 Log(("VBoxGuestCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
2706
2707 /*
2708 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
2709 */
2710 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
2711 {
2712#ifndef RT_OS_WINDOWS
2713 MouseNotifyCallback = pDevExt->MouseNotifyCallback;
2714#endif
2715 fMousePositionChanged = true;
2716 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
2717 }
2718
2719#ifdef VBOX_WITH_HGCM
2720 /*
2721 * The HGCM event/list is kind of different in that we evaluate all entries.
2722 */
2723 if (fEvents & VMMDEV_EVENT_HGCM)
2724 {
2725 RTListForEachSafe(&pDevExt->HGCMWaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
2726 {
2727 if (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE)
2728 {
2729 pWait->fResEvents = VMMDEV_EVENT_HGCM;
2730 RTListNodeRemove(&pWait->ListNode);
2731# ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2732 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
2733# else
2734 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
2735 rc |= RTSemEventMultiSignal(pWait->Event);
2736# endif
2737 }
2738 }
2739 fEvents &= ~VMMDEV_EVENT_HGCM;
2740 }
2741#endif
2742
2743 /*
2744 * Normal FIFO waiter evaluation.
2745 */
2746 fEvents |= pDevExt->f32PendingEvents;
2747 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
2748 {
2749 if ( (pWait->fReqEvents & fEvents)
2750 && !pWait->fResEvents)
2751 {
2752 pWait->fResEvents = pWait->fReqEvents & fEvents;
2753 fEvents &= ~pWait->fResEvents;
2754 RTListNodeRemove(&pWait->ListNode);
2755#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2756 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
2757#else
2758 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
2759 rc |= RTSemEventMultiSignal(pWait->Event);
2760#endif
2761 if (!fEvents)
2762 break;
2763 }
2764 }
2765 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
2766 }
2767 else /* something is serious wrong... */
2768 Log(("VBoxGuestCommonISR: acknowledge events failed rc=%Rrc (events=%#x)!!\n",
2769 pReq->header.rc, pReq->events));
2770 }
2771 else
2772 LogFlow(("VBoxGuestCommonISR: not ours\n"));
2773
2774 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
2775
2776#if defined(VBOXGUEST_USE_DEFERRED_WAKE_UP) && !defined(RT_OS_WINDOWS)
2777 /*
2778 * Do wake-ups.
2779 * Note. On Windows this isn't possible at this IRQL, so a DPC will take
2780 * care of it.
2781 */
2782 VBoxGuestWaitDoWakeUps(pDevExt);
2783#endif
2784
2785 /*
2786 * Work the poll and async notification queues on OSes that implements that.
2787 * (Do this outside the spinlock to prevent some recursive spinlocking.)
2788 */
2789 if (fMousePositionChanged)
2790 {
2791 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
2792 VBoxGuestNativeISRMousePollEvent(pDevExt);
2793#ifndef RT_OS_WINDOWS
2794 if (MouseNotifyCallback.pfnNotify)
2795 MouseNotifyCallback.pfnNotify(MouseNotifyCallback.pvUser);
2796#endif
2797 }
2798
2799 ASMAtomicDecU32(&pDevExt->cISR);
2800 Assert(rc == 0);
2801 return fOurIrq;
2802}
2803
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette