VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 44130

Last change on this file since 44130 was 44130, checked in by vboxsync, 12 years ago

GA/Display: Support for dynamic configuration (position and enable/disable) of the virtual screen for Linux guest.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 99.7 KB
Line 
1/* $Id: VBoxGuest.cpp 44130 2012-12-14 10:27:28Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#define LOG_GROUP LOG_GROUP_DEFAULT
32#include "VBoxGuestInternal.h"
33#include "VBoxGuest2.h"
34#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
35#include <VBox/log.h>
36#include <iprt/mem.h>
37#include <iprt/time.h>
38#include <iprt/memobj.h>
39#include <iprt/asm.h>
40#include <iprt/asm-amd64-x86.h>
41#include <iprt/string.h>
42#include <iprt/process.h>
43#include <iprt/assert.h>
44#include <iprt/param.h>
45#ifdef VBOX_WITH_HGCM
46# include <iprt/thread.h>
47#endif
48#include "version-generated.h"
49#if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
50# include "revision-generated.h"
51#endif
52#ifdef RT_OS_WINDOWS
53# ifndef CTL_CODE
54# include <Windows.h>
55# endif
56#endif
57#if defined(RT_OS_SOLARIS)
58# include <iprt/rand.h>
59#endif
60
61
62/*******************************************************************************
63* Internal Functions *
64*******************************************************************************/
65#ifdef VBOX_WITH_HGCM
66static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
67#endif
68#ifdef DEBUG
69static void testSetMouseStatus(void);
70#endif
71static int VBoxGuestCommonIOCtl_SetMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fFeatures);
72
73#ifdef VBOX_WITH_DPC_LATENCY_CHECKER
74int VBoxGuestCommonIOCtl_DPC(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
75 void *pvData, size_t cbData, size_t *pcbDataReturned);
76#endif /* VBOX_WITH_DPC_LATENCY_CHECKER */
77
78/*******************************************************************************
79* Global Variables *
80*******************************************************************************/
81static const size_t cbChangeMemBalloonReq = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
82
83#if defined(RT_OS_SOLARIS)
84/**
85 * Drag in the rest of IRPT since we share it with the
86 * rest of the kernel modules on Solaris.
87 */
88PFNRT g_apfnVBoxGuestIPRTDeps[] =
89{
90 /* VirtioNet */
91 (PFNRT)RTRandBytes,
92 /* RTSemMutex* */
93 (PFNRT)RTSemMutexCreate,
94 (PFNRT)RTSemMutexDestroy,
95 (PFNRT)RTSemMutexRequest,
96 (PFNRT)RTSemMutexRequestNoResume,
97 (PFNRT)RTSemMutexRequestDebug,
98 (PFNRT)RTSemMutexRequestNoResumeDebug,
99 (PFNRT)RTSemMutexRelease,
100 (PFNRT)RTSemMutexIsOwned,
101 NULL
102};
103#endif /* RT_OS_SOLARIS */
104
105
106/**
107 * Reserves memory in which the VMM can relocate any guest mappings
108 * that are floating around.
109 *
110 * This operation is a little bit tricky since the VMM might not accept
111 * just any address because of address clashes between the three contexts
112 * it operates in, so use a small stack to perform this operation.
113 *
114 * @returns VBox status code (ignored).
115 * @param pDevExt The device extension.
116 */
117static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
118{
119 /*
120 * Query the required space.
121 */
122 VMMDevReqHypervisorInfo *pReq;
123 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
124 if (RT_FAILURE(rc))
125 return rc;
126 pReq->hypervisorStart = 0;
127 pReq->hypervisorSize = 0;
128 rc = VbglGRPerform(&pReq->header);
129 if (RT_FAILURE(rc)) /* this shouldn't happen! */
130 {
131 VbglGRFree(&pReq->header);
132 return rc;
133 }
134
135 /*
136 * The VMM will report back if there is nothing it wants to map, like for
137 * instance in VT-x and AMD-V mode.
138 */
139 if (pReq->hypervisorSize == 0)
140 Log(("vboxGuestInitFixateGuestMappings: nothing to do\n"));
141 else
142 {
143 /*
144 * We have to try several times since the host can be picky
145 * about certain addresses.
146 */
147 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
148 uint32_t cbHypervisor = pReq->hypervisorSize;
149 RTR0MEMOBJ ahTries[5];
150 uint32_t iTry;
151 bool fBitched = false;
152 Log(("vboxGuestInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
153 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
154 {
155 /*
156 * Reserve space, or if that isn't supported, create a object for
157 * some fictive physical memory and map that in to kernel space.
158 *
159 * To make the code a bit uglier, most systems cannot help with
160 * 4MB alignment, so we have to deal with that in addition to
161 * having two ways of getting the memory.
162 */
163 uint32_t uAlignment = _4M;
164 RTR0MEMOBJ hObj;
165 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
166 if (rc == VERR_NOT_SUPPORTED)
167 {
168 uAlignment = PAGE_SIZE;
169 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
170 }
171 /*
172 * If both RTR0MemObjReserveKernel calls above failed because either not supported or
173 * not implemented at all at the current platform, try to map the memory object into the
174 * virtual kernel space.
175 */
176 if (rc == VERR_NOT_SUPPORTED)
177 {
178 if (hFictive == NIL_RTR0MEMOBJ)
179 {
180 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
181 if (RT_FAILURE(rc))
182 break;
183 hFictive = hObj;
184 }
185 uAlignment = _4M;
186 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
187 if (rc == VERR_NOT_SUPPORTED)
188 {
189 uAlignment = PAGE_SIZE;
190 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
191 }
192 }
193 if (RT_FAILURE(rc))
194 {
195 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
196 rc, cbHypervisor, uAlignment, iTry));
197 fBitched = true;
198 break;
199 }
200
201 /*
202 * Try set it.
203 */
204 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
205 pReq->header.rc = VERR_INTERNAL_ERROR;
206 pReq->hypervisorSize = cbHypervisor;
207 pReq->hypervisorStart = (uintptr_t)RTR0MemObjAddress(hObj);
208 if ( uAlignment == PAGE_SIZE
209 && pReq->hypervisorStart & (_4M - 1))
210 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
211 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
212
213 rc = VbglGRPerform(&pReq->header);
214 if (RT_SUCCESS(rc))
215 {
216 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
217 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
218 RTR0MemObjAddress(pDevExt->hGuestMappings),
219 RTR0MemObjSize(pDevExt->hGuestMappings),
220 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
221 break;
222 }
223 ahTries[iTry] = hObj;
224 }
225
226 /*
227 * Cleanup failed attempts.
228 */
229 while (iTry-- > 0)
230 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
231 if ( RT_FAILURE(rc)
232 && hFictive != NIL_RTR0PTR)
233 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
234 if (RT_FAILURE(rc) && !fBitched)
235 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
236 }
237 VbglGRFree(&pReq->header);
238
239 /*
240 * We ignore failed attempts for now.
241 */
242 return VINF_SUCCESS;
243}
244
245
246/**
247 * Undo what vboxGuestInitFixateGuestMappings did.
248 *
249 * @param pDevExt The device extension.
250 */
251static void vboxGuestTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
252{
253 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
254 {
255 /*
256 * Tell the host that we're going to free the memory we reserved for
257 * it, the free it up. (Leak the memory if anything goes wrong here.)
258 */
259 VMMDevReqHypervisorInfo *pReq;
260 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
261 if (RT_SUCCESS(rc))
262 {
263 pReq->hypervisorStart = 0;
264 pReq->hypervisorSize = 0;
265 rc = VbglGRPerform(&pReq->header);
266 VbglGRFree(&pReq->header);
267 }
268 if (RT_SUCCESS(rc))
269 {
270 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
271 AssertRC(rc);
272 }
273 else
274 LogRel(("vboxGuestTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
275
276 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
277 }
278}
279
280
281/**
282 * Sets the interrupt filter mask during initialization and termination.
283 *
284 * This will ASSUME that we're the ones in carge over the mask, so
285 * we'll simply clear all bits we don't set.
286 *
287 * @returns VBox status code (ignored).
288 * @param pDevExt The device extension.
289 * @param fMask The new mask.
290 */
291static int vboxGuestSetFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask)
292{
293 VMMDevCtlGuestFilterMask *pReq;
294 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
295 if (RT_SUCCESS(rc))
296 {
297 pReq->u32OrMask = fMask;
298 pReq->u32NotMask = ~fMask;
299 rc = VbglGRPerform(&pReq->header);
300 if (RT_FAILURE(rc))
301 LogRel(("vboxGuestSetFilterMask: failed with rc=%Rrc\n", rc));
302 VbglGRFree(&pReq->header);
303 }
304 return rc;
305}
306
307
308/**
309 * Inflate the balloon by one chunk represented by an R0 memory object.
310 *
311 * The caller owns the balloon mutex.
312 *
313 * @returns IPRT status code.
314 * @param pMemObj Pointer to the R0 memory object.
315 * @param pReq The pre-allocated request for performing the VMMDev call.
316 */
317static int vboxGuestBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
318{
319 uint32_t iPage;
320 int rc;
321
322 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
323 {
324 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
325 pReq->aPhysPage[iPage] = phys;
326 }
327
328 pReq->fInflate = true;
329 pReq->header.size = cbChangeMemBalloonReq;
330 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
331
332 rc = VbglGRPerform(&pReq->header);
333 if (RT_FAILURE(rc))
334 LogRel(("vboxGuestBalloonInflate: VbglGRPerform failed. rc=%Rrc\n", rc));
335 return rc;
336}
337
338
339/**
340 * Deflate the balloon by one chunk - info the host and free the memory object.
341 *
342 * The caller owns the balloon mutex.
343 *
344 * @returns IPRT status code.
345 * @param pMemObj Pointer to the R0 memory object.
346 * The memory object will be freed afterwards.
347 * @param pReq The pre-allocated request for performing the VMMDev call.
348 */
349static int vboxGuestBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
350{
351 uint32_t iPage;
352 int rc;
353
354 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
355 {
356 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
357 pReq->aPhysPage[iPage] = phys;
358 }
359
360 pReq->fInflate = false;
361 pReq->header.size = cbChangeMemBalloonReq;
362 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
363
364 rc = VbglGRPerform(&pReq->header);
365 if (RT_FAILURE(rc))
366 {
367 LogRel(("vboxGuestBalloonDeflate: VbglGRPerform failed. rc=%Rrc\n", rc));
368 return rc;
369 }
370
371 rc = RTR0MemObjFree(*pMemObj, true);
372 if (RT_FAILURE(rc))
373 {
374 LogRel(("vboxGuestBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
375 return rc;
376 }
377
378 *pMemObj = NIL_RTR0MEMOBJ;
379 return VINF_SUCCESS;
380}
381
382
383/**
384 * Inflate/deflate the memory balloon and notify the host.
385 *
386 * This is a worker used by VBoxGuestCommonIOCtl_CheckMemoryBalloon - it takes
387 * the mutex.
388 *
389 * @returns VBox status code.
390 * @param pDevExt The device extension.
391 * @param pSession The session.
392 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
393 * @param pfHandleInR3 Where to return the handle-in-ring3 indicator
394 * (VINF_SUCCESS if set).
395 */
396static int vboxGuestSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, uint32_t *pfHandleInR3)
397{
398 int rc = VINF_SUCCESS;
399
400 if (pDevExt->MemBalloon.fUseKernelAPI)
401 {
402 VMMDevChangeMemBalloon *pReq;
403 uint32_t i;
404
405 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
406 {
407 LogRel(("vboxGuestSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
408 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
409 return VERR_INVALID_PARAMETER;
410 }
411
412 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
413 return VINF_SUCCESS; /* nothing to do */
414
415 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
416 && !pDevExt->MemBalloon.paMemObj)
417 {
418 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
419 if (!pDevExt->MemBalloon.paMemObj)
420 {
421 LogRel(("VBoxGuestSetBalloonSizeKernel: no memory for paMemObj!\n"));
422 return VERR_NO_MEMORY;
423 }
424 }
425
426 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
427 if (RT_FAILURE(rc))
428 return rc;
429
430 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
431 {
432 /* inflate */
433 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
434 {
435 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
436 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
437 if (RT_FAILURE(rc))
438 {
439 if (rc == VERR_NOT_SUPPORTED)
440 {
441 /* not supported -- fall back to the R3-allocated memory. */
442 rc = VINF_SUCCESS;
443 pDevExt->MemBalloon.fUseKernelAPI = false;
444 Assert(pDevExt->MemBalloon.cChunks == 0);
445 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
446 }
447 /* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
448 * cannot allocate more memory => don't try further, just stop here */
449 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
450 break;
451 }
452
453 rc = vboxGuestBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
454 if (RT_FAILURE(rc))
455 {
456 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
457 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
458 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
459 break;
460 }
461 pDevExt->MemBalloon.cChunks++;
462 }
463 }
464 else
465 {
466 /* deflate */
467 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
468 {
469 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
470 if (RT_FAILURE(rc))
471 {
472 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
473 break;
474 }
475 pDevExt->MemBalloon.cChunks--;
476 }
477 }
478
479 VbglGRFree(&pReq->header);
480 }
481
482 /*
483 * Set the handle-in-ring3 indicator. When set Ring-3 will have to work
484 * the balloon changes via the other API.
485 */
486 *pfHandleInR3 = pDevExt->MemBalloon.fUseKernelAPI ? false : true;
487
488 return rc;
489}
490
491
492/**
493 * Helper to reinit the VBoxVMM communication after hibernation.
494 *
495 * @returns VBox status code.
496 * @param pDevExt The device extension.
497 * @param enmOSType The OS type.
498 */
499int VBoxGuestReinitDevExtAfterHibernation(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
500{
501 int rc = VBoxGuestReportGuestInfo(enmOSType);
502 if (RT_SUCCESS(rc))
503 {
504 rc = VBoxGuestReportDriverStatus(true /* Driver is active */);
505 if (RT_FAILURE(rc))
506 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: could not report guest driver status, rc=%Rrc\n", rc));
507 }
508 else
509 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: could not report guest information to host, rc=%Rrc\n", rc));
510 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: returned with rc=%Rrc\n", rc));
511 return rc;
512}
513
514
515/**
516 * Inflate/deflate the balloon by one chunk.
517 *
518 * Worker for VBoxGuestCommonIOCtl_ChangeMemoryBalloon - it takes the mutex.
519 *
520 * @returns VBox status code.
521 * @param pDevExt The device extension.
522 * @param pSession The session.
523 * @param u64ChunkAddr The address of the chunk to add to / remove from the
524 * balloon.
525 * @param fInflate Inflate if true, deflate if false.
526 */
527static int vboxGuestSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
528 uint64_t u64ChunkAddr, bool fInflate)
529{
530 VMMDevChangeMemBalloon *pReq;
531 int rc = VINF_SUCCESS;
532 uint32_t i;
533 PRTR0MEMOBJ pMemObj = NULL;
534
535 if (fInflate)
536 {
537 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
538 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
539 {
540 LogRel(("vboxGuestSetBalloonSize: cannot inflate balloon, already have %u chunks (max=%u)\n",
541 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
542 return VERR_INVALID_PARAMETER;
543 }
544
545 if (!pDevExt->MemBalloon.paMemObj)
546 {
547 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
548 if (!pDevExt->MemBalloon.paMemObj)
549 {
550 LogRel(("VBoxGuestSetBalloonSizeFromUser: no memory for paMemObj!\n"));
551 return VERR_NO_MEMORY;
552 }
553 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
554 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
555 }
556 }
557 else
558 {
559 if (pDevExt->MemBalloon.cChunks == 0)
560 {
561 AssertMsgFailed(("vboxGuestSetBalloonSize: cannot decrease balloon, already at size 0\n"));
562 return VERR_INVALID_PARAMETER;
563 }
564 }
565
566 /*
567 * Enumerate all memory objects and check if the object is already registered.
568 */
569 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
570 {
571 if ( fInflate
572 && !pMemObj
573 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
574 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
575 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == u64ChunkAddr)
576 {
577 if (fInflate)
578 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
579 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
580 break;
581 }
582 }
583 if (!pMemObj)
584 {
585 if (fInflate)
586 {
587 /* no free object pointer found -- should not happen */
588 return VERR_NO_MEMORY;
589 }
590
591 /* cannot free this memory as it wasn't provided before */
592 return VERR_NOT_FOUND;
593 }
594
595 /*
596 * Try inflate / default the balloon as requested.
597 */
598 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
599 if (RT_FAILURE(rc))
600 return rc;
601
602 if (fInflate)
603 {
604 rc = RTR0MemObjLockUser(pMemObj, (RTR3PTR)u64ChunkAddr, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
605 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
606 if (RT_SUCCESS(rc))
607 {
608 rc = vboxGuestBalloonInflate(pMemObj, pReq);
609 if (RT_SUCCESS(rc))
610 pDevExt->MemBalloon.cChunks++;
611 else
612 {
613 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
614 RTR0MemObjFree(*pMemObj, true);
615 *pMemObj = NIL_RTR0MEMOBJ;
616 }
617 }
618 }
619 else
620 {
621 rc = vboxGuestBalloonDeflate(pMemObj, pReq);
622 if (RT_SUCCESS(rc))
623 pDevExt->MemBalloon.cChunks--;
624 else
625 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
626 }
627
628 VbglGRFree(&pReq->header);
629 return rc;
630}
631
632
633/**
634 * Cleanup the memory balloon of a session.
635 *
636 * Will request the balloon mutex, so it must be valid and the caller must not
637 * own it already.
638 *
639 * @param pDevExt The device extension.
640 * @param pDevExt The session. Can be NULL at unload.
641 */
642static void vboxGuestCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
643{
644 RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
645 if ( pDevExt->MemBalloon.pOwner == pSession
646 || pSession == NULL /*unload*/)
647 {
648 if (pDevExt->MemBalloon.paMemObj)
649 {
650 VMMDevChangeMemBalloon *pReq;
651 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
652 if (RT_SUCCESS(rc))
653 {
654 uint32_t i;
655 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
656 {
657 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
658 if (RT_FAILURE(rc))
659 {
660 LogRel(("vboxGuestCloseMemBalloon: Deflate failed with rc=%Rrc. Will leak %u chunks.\n",
661 rc, pDevExt->MemBalloon.cChunks));
662 break;
663 }
664 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
665 pDevExt->MemBalloon.cChunks--;
666 }
667 VbglGRFree(&pReq->header);
668 }
669 else
670 LogRel(("vboxGuestCloseMemBalloon: Failed to allocate VMMDev request buffer (rc=%Rrc). Will leak %u chunks.\n",
671 rc, pDevExt->MemBalloon.cChunks));
672 RTMemFree(pDevExt->MemBalloon.paMemObj);
673 pDevExt->MemBalloon.paMemObj = NULL;
674 }
675
676 pDevExt->MemBalloon.pOwner = NULL;
677 }
678 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
679}
680
681
682/**
683 * Initializes the VBoxGuest device extension when the
684 * device driver is loaded.
685 *
686 * The native code locates the VMMDev on the PCI bus and retrieve
687 * the MMIO and I/O port ranges, this function will take care of
688 * mapping the MMIO memory (if present). Upon successful return
689 * the native code should set up the interrupt handler.
690 *
691 * @returns VBox status code.
692 *
693 * @param pDevExt The device extension. Allocated by the native code.
694 * @param IOPortBase The base of the I/O port range.
695 * @param pvMMIOBase The base of the MMIO memory mapping.
696 * This is optional, pass NULL if not present.
697 * @param cbMMIO The size of the MMIO memory mapping.
698 * This is optional, pass 0 if not present.
699 * @param enmOSType The guest OS type to report to the VMMDev.
700 * @param fFixedEvents Events that will be enabled upon init and no client
701 * will ever be allowed to mask.
702 */
703int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
704 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
705{
706 int rc, rc2;
707 unsigned i;
708
709 /*
710 * Adjust fFixedEvents.
711 */
712#ifdef VBOX_WITH_HGCM
713 fFixedEvents |= VMMDEV_EVENT_HGCM;
714#endif
715
716 /*
717 * Initialize the data.
718 */
719 pDevExt->IOPortBase = IOPortBase;
720 pDevExt->pVMMDevMemory = NULL;
721 pDevExt->fFixedEvents = fFixedEvents;
722 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
723 pDevExt->EventSpinlock = NIL_RTSPINLOCK;
724 pDevExt->pIrqAckEvents = NULL;
725 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
726 RTListInit(&pDevExt->WaitList);
727#ifdef VBOX_WITH_HGCM
728 RTListInit(&pDevExt->HGCMWaitList);
729#endif
730#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
731 RTListInit(&pDevExt->WakeUpList);
732#endif
733 RTListInit(&pDevExt->WokenUpList);
734 RTListInit(&pDevExt->FreeList);
735#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
736 pDevExt->fVRDPEnabled = false;
737#endif
738 pDevExt->fLoggingEnabled = false;
739 pDevExt->f32PendingEvents = 0;
740 pDevExt->u32MousePosChangedSeq = 0;
741 pDevExt->SessionSpinlock = NIL_RTSPINLOCK;
742 pDevExt->MemBalloon.hMtx = NIL_RTSEMFASTMUTEX;
743 pDevExt->MemBalloon.cChunks = 0;
744 pDevExt->MemBalloon.cMaxChunks = 0;
745 pDevExt->MemBalloon.fUseKernelAPI = true;
746 pDevExt->MemBalloon.paMemObj = NULL;
747 pDevExt->MemBalloon.pOwner = NULL;
748 for (i = 0; i < RT_ELEMENTS(pDevExt->acMouseFeatureUsage); ++i)
749 pDevExt->acMouseFeatureUsage[i] = 0;
750 pDevExt->fMouseStatus = 0;
751 pDevExt->MouseNotifyCallback.pfnNotify = NULL;
752 pDevExt->MouseNotifyCallback.pvUser = NULL;
753 pDevExt->cISR = 0;
754
755 /*
756 * If there is an MMIO region validate the version and size.
757 */
758 if (pvMMIOBase)
759 {
760 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
761 Assert(cbMMIO);
762 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
763 && pVMMDev->u32Size >= 32
764 && pVMMDev->u32Size <= cbMMIO)
765 {
766 pDevExt->pVMMDevMemory = pVMMDev;
767 Log(("VBoxGuestInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
768 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
769 }
770 else /* try live without it. */
771 LogRel(("VBoxGuestInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
772 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
773 }
774
775 /*
776 * Create the wait and session spinlocks as well as the ballooning mutex.
777 */
778 rc = RTSpinlockCreate(&pDevExt->EventSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestEvent");
779 if (RT_SUCCESS(rc))
780 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestSession");
781 if (RT_FAILURE(rc))
782 {
783 LogRel(("VBoxGuestInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
784 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
785 RTSpinlockDestroy(pDevExt->EventSpinlock);
786 return rc;
787 }
788
789 rc = RTSemFastMutexCreate(&pDevExt->MemBalloon.hMtx);
790 if (RT_FAILURE(rc))
791 {
792 LogRel(("VBoxGuestInitDevExt: failed to create mutex, rc=%Rrc!\n", rc));
793 RTSpinlockDestroy(pDevExt->SessionSpinlock);
794 RTSpinlockDestroy(pDevExt->EventSpinlock);
795 return rc;
796 }
797
798 /*
799 * Initialize the guest library and report the guest info back to VMMDev,
800 * set the interrupt control filter mask, and fixate the guest mappings
801 * made by the VMM.
802 */
803 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
804 if (RT_SUCCESS(rc))
805 {
806 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
807 if (RT_SUCCESS(rc))
808 {
809 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
810 Assert(pDevExt->PhysIrqAckEvents != 0);
811
812 rc = VBoxGuestReportGuestInfo(enmOSType);
813 if (RT_SUCCESS(rc))
814 {
815 rc = vboxGuestSetFilterMask(pDevExt, fFixedEvents);
816 if (RT_SUCCESS(rc))
817 {
818 /*
819 * Disable guest graphics capability by default. The guest specific
820 * graphics driver will re-enable this when it is necessary.
821 */
822 rc = VBoxGuestSetGuestCapabilities(0, VMMDEV_GUEST_SUPPORTS_GRAPHICS);
823 if (RT_SUCCESS(rc))
824 {
825 vboxGuestInitFixateGuestMappings(pDevExt);
826
827#ifdef DEBUG
828 testSetMouseStatus(); /* Other tests? */
829#endif
830
831 rc = VBoxGuestReportDriverStatus(true /* Driver is active */);
832 if (RT_FAILURE(rc))
833 LogRel(("VBoxGuestInitDevExt: VBoxReportGuestDriverStatus failed, rc=%Rrc\n", rc));
834
835 Log(("VBoxGuestInitDevExt: returns success\n"));
836 return VINF_SUCCESS;
837 }
838
839 LogRel(("VBoxGuestInitDevExt: VBoxGuestSetGuestCapabilities failed, rc=%Rrc\n", rc));
840 }
841 else
842 LogRel(("VBoxGuestInitDevExt: vboxGuestSetFilterMask failed, rc=%Rrc\n", rc));
843 }
844 else
845 LogRel(("VBoxGuestInitDevExt: VBoxReportGuestInfo failed, rc=%Rrc\n", rc));
846 VbglGRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
847 }
848 else
849 LogRel(("VBoxGuestInitDevExt: VBoxGRAlloc failed, rc=%Rrc\n", rc));
850
851 VbglTerminate();
852 }
853 else
854 LogRel(("VBoxGuestInitDevExt: VbglInit failed, rc=%Rrc\n", rc));
855
856 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
857 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
858 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
859 return rc; /* (failed) */
860}
861
862
863/**
864 * Deletes all the items in a wait chain.
865 * @param pList The head of the chain.
866 */
867static void VBoxGuestDeleteWaitList(PRTLISTNODE pList)
868{
869 while (!RTListIsEmpty(pList))
870 {
871 int rc2;
872 PVBOXGUESTWAIT pWait = RTListGetFirst(pList, VBOXGUESTWAIT, ListNode);
873 RTListNodeRemove(&pWait->ListNode);
874
875 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
876 pWait->Event = NIL_RTSEMEVENTMULTI;
877 pWait->pSession = NULL;
878 RTMemFree(pWait);
879 }
880}
881
882
883/**
884 * Destroys the VBoxGuest device extension.
885 *
886 * The native code should call this before the driver is loaded,
887 * but don't call this on shutdown.
888 *
889 * @param pDevExt The device extension.
890 */
891void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
892{
893 int rc2;
894 Log(("VBoxGuestDeleteDevExt:\n"));
895 Log(("VBoxGuest: The additions driver is terminating.\n"));
896
897 /*
898 * Clean up the bits that involves the host first.
899 */
900 vboxGuestTermUnfixGuestMappings(pDevExt);
901 VBoxGuestSetGuestCapabilities(0, UINT32_MAX); /* clears all capabilities */
902 vboxGuestSetFilterMask(pDevExt, 0); /* filter all events */
903 vboxGuestCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
904
905 /*
906 * Cleanup all the other resources.
907 */
908 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
909 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
910 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
911
912 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
913#ifdef VBOX_WITH_HGCM
914 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
915#endif
916#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
917 VBoxGuestDeleteWaitList(&pDevExt->WakeUpList);
918#endif
919 VBoxGuestDeleteWaitList(&pDevExt->WokenUpList);
920 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
921
922 VbglTerminate();
923
924 pDevExt->pVMMDevMemory = NULL;
925
926 pDevExt->IOPortBase = 0;
927 pDevExt->pIrqAckEvents = NULL;
928}
929
930
931/**
932 * Creates a VBoxGuest user session.
933 *
934 * The native code calls this when a ring-3 client opens the device.
935 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
936 *
937 * @returns VBox status code.
938 * @param pDevExt The device extension.
939 * @param ppSession Where to store the session on success.
940 */
941int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
942{
943 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
944 if (RT_UNLIKELY(!pSession))
945 {
946 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
947 return VERR_NO_MEMORY;
948 }
949
950 pSession->Process = RTProcSelf();
951 pSession->R0Process = RTR0ProcHandleSelf();
952 pSession->pDevExt = pDevExt;
953
954 *ppSession = pSession;
955 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
956 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
957 return VINF_SUCCESS;
958}
959
960
961/**
962 * Creates a VBoxGuest kernel session.
963 *
964 * The native code calls this when a ring-0 client connects to the device.
965 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
966 *
967 * @returns VBox status code.
968 * @param pDevExt The device extension.
969 * @param ppSession Where to store the session on success.
970 */
971int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
972{
973 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
974 if (RT_UNLIKELY(!pSession))
975 {
976 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
977 return VERR_NO_MEMORY;
978 }
979
980 pSession->Process = NIL_RTPROCESS;
981 pSession->R0Process = NIL_RTR0PROCESS;
982 pSession->pDevExt = pDevExt;
983
984 *ppSession = pSession;
985 LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
986 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
987 return VINF_SUCCESS;
988}
989
990
991
992/**
993 * Closes a VBoxGuest session.
994 *
995 * @param pDevExt The device extension.
996 * @param pSession The session to close (and free).
997 */
998void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
999{
1000 unsigned i; NOREF(i);
1001 Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
1002 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
1003
1004#ifdef VBOX_WITH_HGCM
1005 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1006 if (pSession->aHGCMClientIds[i])
1007 {
1008 VBoxGuestHGCMDisconnectInfo Info;
1009 Info.result = 0;
1010 Info.u32ClientID = pSession->aHGCMClientIds[i];
1011 pSession->aHGCMClientIds[i] = 0;
1012 Log(("VBoxGuestCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
1013 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1014 }
1015#endif
1016
1017 pSession->pDevExt = NULL;
1018 pSession->Process = NIL_RTPROCESS;
1019 pSession->R0Process = NIL_RTR0PROCESS;
1020 vboxGuestCloseMemBalloon(pDevExt, pSession);
1021 /* Reset any mouse status flags which the session may have set. */
1022 VBoxGuestCommonIOCtl_SetMouseStatus(pDevExt, pSession, 0);
1023 RTMemFree(pSession);
1024}
1025
1026
1027/**
1028 * Allocates a wait-for-event entry.
1029 *
1030 * @returns The wait-for-event entry.
1031 * @param pDevExt The device extension.
1032 * @param pSession The session that's allocating this. Can be NULL.
1033 */
1034static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1035{
1036 /*
1037 * Allocate it one way or the other.
1038 */
1039 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1040 if (pWait)
1041 {
1042 RTSpinlockAcquire(pDevExt->EventSpinlock);
1043
1044 pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1045 if (pWait)
1046 RTListNodeRemove(&pWait->ListNode);
1047
1048 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1049 }
1050 if (!pWait)
1051 {
1052 static unsigned s_cErrors = 0;
1053 int rc;
1054
1055 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1056 if (!pWait)
1057 {
1058 if (s_cErrors++ < 32)
1059 LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
1060 return NULL;
1061 }
1062
1063 rc = RTSemEventMultiCreate(&pWait->Event);
1064 if (RT_FAILURE(rc))
1065 {
1066 if (s_cErrors++ < 32)
1067 LogRel(("VBoxGuestCommonIOCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
1068 RTMemFree(pWait);
1069 return NULL;
1070 }
1071
1072 pWait->ListNode.pNext = NULL;
1073 pWait->ListNode.pPrev = NULL;
1074 }
1075
1076 /*
1077 * Zero members just as an precaution.
1078 */
1079 pWait->fReqEvents = 0;
1080 pWait->fResEvents = 0;
1081#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1082 pWait->fPendingWakeUp = false;
1083 pWait->fFreeMe = false;
1084#endif
1085 pWait->pSession = pSession;
1086#ifdef VBOX_WITH_HGCM
1087 pWait->pHGCMReq = NULL;
1088#endif
1089 RTSemEventMultiReset(pWait->Event);
1090 return pWait;
1091}
1092
1093
1094/**
1095 * Frees the wait-for-event entry.
1096 *
1097 * The caller must own the wait spinlock !
1098 * The entry must be in a list!
1099 *
1100 * @param pDevExt The device extension.
1101 * @param pWait The wait-for-event entry to free.
1102 */
1103static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1104{
1105 pWait->fReqEvents = 0;
1106 pWait->fResEvents = 0;
1107#ifdef VBOX_WITH_HGCM
1108 pWait->pHGCMReq = NULL;
1109#endif
1110#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1111 Assert(!pWait->fFreeMe);
1112 if (pWait->fPendingWakeUp)
1113 pWait->fFreeMe = true;
1114 else
1115#endif
1116 {
1117 RTListNodeRemove(&pWait->ListNode);
1118 RTListAppend(&pDevExt->FreeList, &pWait->ListNode);
1119 }
1120}
1121
1122
1123/**
1124 * Frees the wait-for-event entry.
1125 *
1126 * @param pDevExt The device extension.
1127 * @param pWait The wait-for-event entry to free.
1128 */
1129static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1130{
1131 RTSpinlockAcquire(pDevExt->EventSpinlock);
1132 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1133 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1134}
1135
1136
1137#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1138/**
1139 * Processes the wake-up list.
1140 *
1141 * All entries in the wake-up list gets signalled and moved to the woken-up
1142 * list.
1143 *
1144 * @param pDevExt The device extension.
1145 */
1146void VBoxGuestWaitDoWakeUps(PVBOXGUESTDEVEXT pDevExt)
1147{
1148 if (!RTListIsEmpty(&pDevExt->WakeUpList))
1149 {
1150 RTSpinlockAcquire(pDevExt->EventSpinlock);
1151 for (;;)
1152 {
1153 int rc;
1154 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->WakeUpList, VBOXGUESTWAIT, ListNode);
1155 if (!pWait)
1156 break;
1157 pWait->fPendingWakeUp = true;
1158 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1159
1160 rc = RTSemEventMultiSignal(pWait->Event);
1161 AssertRC(rc);
1162
1163 RTSpinlockAcquire(pDevExt->EventSpinlock);
1164 pWait->fPendingWakeUp = false;
1165 if (!pWait->fFreeMe)
1166 {
1167 RTListNodeRemove(&pWait->ListNode);
1168 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1169 }
1170 else
1171 {
1172 pWait->fFreeMe = false;
1173 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1174 }
1175 }
1176 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1177 }
1178}
1179#endif /* VBOXGUEST_USE_DEFERRED_WAKE_UP */
1180
1181
1182/**
1183 * Modifies the guest capabilities.
1184 *
1185 * Should be called during driver init and termination.
1186 *
1187 * @returns VBox status code.
1188 * @param fOr The Or mask (what to enable).
1189 * @param fNot The Not mask (what to disable).
1190 */
1191int VBoxGuestSetGuestCapabilities(uint32_t fOr, uint32_t fNot)
1192{
1193 VMMDevReqGuestCapabilities2 *pReq;
1194 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
1195 if (RT_FAILURE(rc))
1196 {
1197 Log(("VBoxGuestSetGuestCapabilities: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1198 sizeof(*pReq), sizeof(*pReq), rc));
1199 return rc;
1200 }
1201
1202 pReq->u32OrMask = fOr;
1203 pReq->u32NotMask = fNot;
1204
1205 rc = VbglGRPerform(&pReq->header);
1206 if (RT_FAILURE(rc))
1207 Log(("VBoxGuestSetGuestCapabilities: VbglGRPerform failed, rc=%Rrc!\n", rc));
1208
1209 VbglGRFree(&pReq->header);
1210 return rc;
1211}
1212
1213
1214/**
1215 * Implements the fast (no input or output) type of IOCtls.
1216 *
1217 * This is currently just a placeholder stub inherited from the support driver code.
1218 *
1219 * @returns VBox status code.
1220 * @param iFunction The IOCtl function number.
1221 * @param pDevExt The device extension.
1222 * @param pSession The session.
1223 */
1224int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1225{
1226 Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1227
1228 NOREF(iFunction);
1229 NOREF(pDevExt);
1230 NOREF(pSession);
1231 return VERR_NOT_SUPPORTED;
1232}
1233
1234
1235/**
1236 * Return the VMM device port.
1237 *
1238 * returns IPRT status code.
1239 * @param pDevExt The device extension.
1240 * @param pInfo The request info.
1241 * @param pcbDataReturned (out) contains the number of bytes to return.
1242 */
1243static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
1244{
1245 Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
1246 pInfo->portAddress = pDevExt->IOPortBase;
1247 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
1248 if (pcbDataReturned)
1249 *pcbDataReturned = sizeof(*pInfo);
1250 return VINF_SUCCESS;
1251}
1252
1253
1254#ifndef RT_OS_WINDOWS
1255/**
1256 * Set the callback for the kernel mouse handler.
1257 *
1258 * returns IPRT status code.
1259 * @param pDevExt The device extension.
1260 * @param pNotify The new callback information.
1261 * @note This function takes the session spinlock to update the callback
1262 * information, but the interrupt handler will not do this. To make
1263 * sure that the interrupt handler sees a consistent structure, we
1264 * set the function pointer to NULL before updating the data and only
1265 * set it to the correct value once the data is updated. Since the
1266 * interrupt handler executes atomically this ensures that the data is
1267 * valid if the function pointer is non-NULL.
1268 */
1269int VBoxGuestCommonIOCtl_SetMouseNotifyCallback(PVBOXGUESTDEVEXT pDevExt, VBoxGuestMouseSetNotifyCallback *pNotify)
1270{
1271 Log(("VBoxGuestCommonIOCtl: SET_MOUSE_NOTIFY_CALLBACK\n"));
1272
1273 RTSpinlockAcquire(pDevExt->EventSpinlock);
1274 pDevExt->MouseNotifyCallback = *pNotify;
1275 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1276
1277 /* Make sure no active ISR is referencing the old data - hacky but should be
1278 * effective. */
1279 while (pDevExt->cISR > 0)
1280 ASMNopPause();
1281
1282 return VINF_SUCCESS;
1283}
1284#endif
1285
1286
1287/**
1288 * Worker VBoxGuestCommonIOCtl_WaitEvent.
1289 *
1290 * The caller enters the spinlock, we leave it.
1291 *
1292 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
1293 */
1294DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWaitEventInfo *pInfo,
1295 int iEvent, const uint32_t fReqEvents)
1296{
1297 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
1298 if (fMatches)
1299 {
1300 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
1301 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1302
1303 pInfo->u32EventFlagsOut = fMatches;
1304 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1305 if (fReqEvents & ~((uint32_t)1 << iEvent))
1306 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1307 else
1308 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1309 return VINF_SUCCESS;
1310 }
1311 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1312 return VERR_TIMEOUT;
1313}
1314
1315
1316static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1317 VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned, bool fInterruptible)
1318{
1319 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
1320 uint32_t fResEvents;
1321 int iEvent;
1322 PVBOXGUESTWAIT pWait;
1323 int rc;
1324
1325 pInfo->u32EventFlagsOut = 0;
1326 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1327 if (pcbDataReturned)
1328 *pcbDataReturned = sizeof(*pInfo);
1329
1330 /*
1331 * Copy and verify the input mask.
1332 */
1333 iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
1334 if (RT_UNLIKELY(iEvent < 0))
1335 {
1336 Log(("VBoxGuestCommonIOCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
1337 return VERR_INVALID_PARAMETER;
1338 }
1339
1340 /*
1341 * Check the condition up front, before doing the wait-for-event allocations.
1342 */
1343 RTSpinlockAcquire(pDevExt->EventSpinlock);
1344 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents);
1345 if (rc == VINF_SUCCESS)
1346 return rc;
1347
1348 if (!pInfo->u32TimeoutIn)
1349 {
1350 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1351 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
1352 return VERR_TIMEOUT;
1353 }
1354
1355 pWait = VBoxGuestWaitAlloc(pDevExt, pSession);
1356 if (!pWait)
1357 return VERR_NO_MEMORY;
1358 pWait->fReqEvents = fReqEvents;
1359
1360 /*
1361 * We've got the wait entry now, re-enter the spinlock and check for the condition.
1362 * If the wait condition is met, return.
1363 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
1364 */
1365 RTSpinlockAcquire(pDevExt->EventSpinlock);
1366 RTListAppend(&pDevExt->WaitList, &pWait->ListNode);
1367 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents);
1368 if (rc == VINF_SUCCESS)
1369 {
1370 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1371 return rc;
1372 }
1373
1374 if (fInterruptible)
1375 rc = RTSemEventMultiWaitNoResume(pWait->Event,
1376 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1377 else
1378 rc = RTSemEventMultiWait(pWait->Event,
1379 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1380
1381 /*
1382 * There is one special case here and that's when the semaphore is
1383 * destroyed upon device driver unload. This shouldn't happen of course,
1384 * but in case it does, just get out of here ASAP.
1385 */
1386 if (rc == VERR_SEM_DESTROYED)
1387 return rc;
1388
1389 /*
1390 * Unlink the wait item and dispose of it.
1391 */
1392 RTSpinlockAcquire(pDevExt->EventSpinlock);
1393 fResEvents = pWait->fResEvents;
1394 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1395 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1396
1397 /*
1398 * Now deal with the return code.
1399 */
1400 if ( fResEvents
1401 && fResEvents != UINT32_MAX)
1402 {
1403 pInfo->u32EventFlagsOut = fResEvents;
1404 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1405 if (fReqEvents & ~((uint32_t)1 << iEvent))
1406 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1407 else
1408 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1409 rc = VINF_SUCCESS;
1410 }
1411 else if ( fResEvents == UINT32_MAX
1412 || rc == VERR_INTERRUPTED)
1413 {
1414 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
1415 rc = VERR_INTERRUPTED;
1416 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
1417 }
1418 else if (rc == VERR_TIMEOUT)
1419 {
1420 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1421 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT (2)\n"));
1422 }
1423 else
1424 {
1425 if (RT_SUCCESS(rc))
1426 {
1427 static unsigned s_cErrors = 0;
1428 if (s_cErrors++ < 32)
1429 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
1430 rc = VERR_INTERNAL_ERROR;
1431 }
1432 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1433 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc\n", rc));
1434 }
1435
1436 return rc;
1437}
1438
1439
1440static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1441{
1442 PVBOXGUESTWAIT pWait;
1443 PVBOXGUESTWAIT pSafe;
1444 int rc = 0;
1445
1446 Log(("VBoxGuestCommonIOCtl: CANCEL_ALL_WAITEVENTS\n"));
1447
1448 /*
1449 * Walk the event list and wake up anyone with a matching session.
1450 */
1451 RTSpinlockAcquire(pDevExt->EventSpinlock);
1452 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
1453 {
1454 if (pWait->pSession == pSession)
1455 {
1456 pWait->fResEvents = UINT32_MAX;
1457 RTListNodeRemove(&pWait->ListNode);
1458#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1459 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
1460#else
1461 rc |= RTSemEventMultiSignal(pWait->Event);
1462 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1463#endif
1464 }
1465 }
1466 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1467 Assert(rc == 0);
1468
1469#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1470 VBoxGuestWaitDoWakeUps(pDevExt);
1471#endif
1472
1473 return VINF_SUCCESS;
1474}
1475
1476/**
1477 * Checks if the VMM request is allowed in the context of the given session.
1478 *
1479 * @returns VINF_SUCCESS or VERR_PERMISSION_DENIED.
1480 * @param pSession The calling session.
1481 * @param enmType The request type.
1482 * @param pReqHdr The request.
1483 */
1484static int VBoxGuestCheckIfVMMReqAllowed(PVBOXGUESTSESSION pSession, VMMDevRequestType enmType,
1485 VMMDevRequestHeader const *pReqHdr)
1486{
1487 /*
1488 * Categorize the request being made.
1489 */
1490 /** @todo This need quite some more work! */
1491 enum
1492 {
1493 kLevel_Invalid, kLevel_NoOne, kLevel_OnlyVBoxGuest, kLevel_OnlyKernel, kLevel_TrustedUsers, kLevel_AllUsers
1494 } enmRequired;
1495 switch (enmType)
1496 {
1497 /*
1498 * Deny access to anything we don't know or provide specialized I/O controls for.
1499 */
1500#ifdef VBOX_WITH_HGCM
1501 case VMMDevReq_HGCMConnect:
1502 case VMMDevReq_HGCMDisconnect:
1503# ifdef VBOX_WITH_64_BITS_GUESTS
1504 case VMMDevReq_HGCMCall32:
1505 case VMMDevReq_HGCMCall64:
1506# else
1507 case VMMDevReq_HGCMCall:
1508# endif /* VBOX_WITH_64_BITS_GUESTS */
1509 case VMMDevReq_HGCMCancel:
1510 case VMMDevReq_HGCMCancel2:
1511#endif /* VBOX_WITH_HGCM */
1512 default:
1513 enmRequired = kLevel_NoOne;
1514 break;
1515
1516 /*
1517 * There are a few things only this driver can do (and it doesn't use
1518 * the VMMRequst I/O control route anyway, but whatever).
1519 */
1520 case VMMDevReq_ReportGuestInfo:
1521 case VMMDevReq_ReportGuestInfo2:
1522 case VMMDevReq_GetHypervisorInfo:
1523 case VMMDevReq_SetHypervisorInfo:
1524 case VMMDevReq_RegisterPatchMemory:
1525 case VMMDevReq_DeregisterPatchMemory:
1526 case VMMDevReq_GetMemBalloonChangeRequest:
1527 enmRequired = kLevel_OnlyVBoxGuest;
1528 break;
1529
1530 /*
1531 * Trusted users apps only.
1532 */
1533 case VMMDevReq_QueryCredentials:
1534 case VMMDevReq_ReportCredentialsJudgement:
1535 case VMMDevReq_RegisterSharedModule:
1536 case VMMDevReq_UnregisterSharedModule:
1537 case VMMDevReq_WriteCoreDump:
1538 case VMMDevReq_GetCpuHotPlugRequest:
1539 case VMMDevReq_SetCpuHotPlugStatus:
1540 case VMMDevReq_CheckSharedModules:
1541 case VMMDevReq_GetPageSharingStatus:
1542 case VMMDevReq_DebugIsPageShared:
1543 case VMMDevReq_ReportGuestStats:
1544 case VMMDevReq_GetStatisticsChangeRequest:
1545 case VMMDevReq_ChangeMemBalloon:
1546 enmRequired = kLevel_TrustedUsers;
1547 break;
1548
1549 /*
1550 * Anyone.
1551 */
1552 case VMMDevReq_GetMouseStatus:
1553 case VMMDevReq_SetMouseStatus:
1554 case VMMDevReq_SetPointerShape:
1555 case VMMDevReq_GetHostVersion:
1556 case VMMDevReq_Idle:
1557 case VMMDevReq_GetHostTime:
1558 case VMMDevReq_SetPowerStatus:
1559 case VMMDevReq_AcknowledgeEvents:
1560 case VMMDevReq_CtlGuestFilterMask:
1561 case VMMDevReq_ReportGuestStatus:
1562 case VMMDevReq_GetDisplayChangeRequest:
1563 case VMMDevReq_VideoModeSupported:
1564 case VMMDevReq_GetHeightReduction:
1565 case VMMDevReq_GetDisplayChangeRequest2:
1566 case VMMDevReq_SetGuestCapabilities:
1567 case VMMDevReq_VideoModeSupported2:
1568 case VMMDevReq_VideoAccelEnable:
1569 case VMMDevReq_VideoAccelFlush:
1570 case VMMDevReq_VideoSetVisibleRegion:
1571 case VMMDevReq_GetDisplayChangeRequestEx:
1572 case VMMDevReq_GetSeamlessChangeRequest:
1573 case VMMDevReq_GetVRDPChangeRequest:
1574 case VMMDevReq_LogString:
1575 case VMMDevReq_GetSessionId:
1576 enmRequired = kLevel_AllUsers;
1577 break;
1578
1579 /*
1580 * Depends on the request parameters...
1581 */
1582 /** @todo this have to be changed into an I/O control and the facilities
1583 * tracked in the session so they can automatically be failed when the
1584 * session terminates without reporting the new status.
1585 *
1586 * The information presented by IGuest is not reliable without this! */
1587 case VMMDevReq_ReportGuestCapabilities:
1588 switch (((VMMDevReportGuestStatus const *)pReqHdr)->guestStatus.facility)
1589 {
1590 case VBoxGuestFacilityType_All:
1591 case VBoxGuestFacilityType_VBoxGuestDriver:
1592 enmRequired = kLevel_OnlyVBoxGuest;
1593 break;
1594 case VBoxGuestFacilityType_VBoxService:
1595 enmRequired = kLevel_TrustedUsers;
1596 break;
1597 case VBoxGuestFacilityType_VBoxTrayClient:
1598 case VBoxGuestFacilityType_Seamless:
1599 case VBoxGuestFacilityType_Graphics:
1600 default:
1601 enmRequired = kLevel_AllUsers;
1602 break;
1603 }
1604 break;
1605 }
1606
1607 /*
1608 * Check against the session.
1609 */
1610 switch (enmRequired)
1611 {
1612 default:
1613 case kLevel_NoOne:
1614 break;
1615 case kLevel_OnlyVBoxGuest:
1616 case kLevel_OnlyKernel:
1617 if (pSession->R0Process == NIL_RTR0PROCESS)
1618 return VINF_SUCCESS;
1619 break;
1620 case kLevel_TrustedUsers:
1621 case kLevel_AllUsers:
1622 return VINF_SUCCESS;
1623 }
1624
1625 return VERR_PERMISSION_DENIED;
1626}
1627
1628static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1629 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
1630{
1631 int rc;
1632 VMMDevRequestHeader *pReqCopy;
1633
1634 /*
1635 * Validate the header and request size.
1636 */
1637 const VMMDevRequestType enmType = pReqHdr->requestType;
1638 const uint32_t cbReq = pReqHdr->size;
1639 const uint32_t cbMinSize = vmmdevGetRequestSize(enmType);
1640
1641 Log(("VBoxGuestCommonIOCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
1642
1643 if (cbReq < cbMinSize)
1644 {
1645 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
1646 cbReq, cbMinSize, enmType));
1647 return VERR_INVALID_PARAMETER;
1648 }
1649 if (cbReq > cbData)
1650 {
1651 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
1652 cbData, cbReq, enmType));
1653 return VERR_INVALID_PARAMETER;
1654 }
1655 rc = VbglGRVerify(pReqHdr, cbData);
1656 if (RT_FAILURE(rc))
1657 {
1658 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc!!\n",
1659 cbData, cbReq, enmType, rc));
1660 return rc;
1661 }
1662
1663 rc = VBoxGuestCheckIfVMMReqAllowed(pSession, enmType, pReqHdr);
1664 if (RT_FAILURE(rc))
1665 {
1666 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: Operation not allowed! type=%#x rc=%Rrc\n", enmType, rc));
1667 return rc;
1668 }
1669
1670 /*
1671 * Make a copy of the request in the physical memory heap so
1672 * the VBoxGuestLibrary can more easily deal with the request.
1673 * (This is really a waste of time since the OS or the OS specific
1674 * code has already buffered or locked the input/output buffer, but
1675 * it does makes things a bit simpler wrt to phys address.)
1676 */
1677 rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
1678 if (RT_FAILURE(rc))
1679 {
1680 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1681 cbReq, cbReq, rc));
1682 return rc;
1683 }
1684 memcpy(pReqCopy, pReqHdr, cbReq);
1685
1686 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
1687 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
1688
1689 rc = VbglGRPerform(pReqCopy);
1690 if ( RT_SUCCESS(rc)
1691 && RT_SUCCESS(pReqCopy->rc))
1692 {
1693 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
1694 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
1695
1696 memcpy(pReqHdr, pReqCopy, cbReq);
1697 if (pcbDataReturned)
1698 *pcbDataReturned = cbReq;
1699 }
1700 else if (RT_FAILURE(rc))
1701 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
1702 else
1703 {
1704 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
1705 rc = pReqCopy->rc;
1706 }
1707
1708 VbglGRFree(pReqCopy);
1709 return rc;
1710}
1711
1712
1713static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, VBoxGuestFilterMaskInfo *pInfo)
1714{
1715 VMMDevCtlGuestFilterMask *pReq;
1716 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
1717 if (RT_FAILURE(rc))
1718 {
1719 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1720 sizeof(*pReq), sizeof(*pReq), rc));
1721 return rc;
1722 }
1723
1724 pReq->u32OrMask = pInfo->u32OrMask;
1725 pReq->u32NotMask = pInfo->u32NotMask;
1726 pReq->u32NotMask &= ~pDevExt->fFixedEvents; /* don't permit these to be cleared! */
1727 rc = VbglGRPerform(&pReq->header);
1728 if (RT_FAILURE(rc))
1729 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: VbglGRPerform failed, rc=%Rrc!\n", rc));
1730
1731 VbglGRFree(&pReq->header);
1732 return rc;
1733}
1734
1735#ifdef VBOX_WITH_HGCM
1736
1737AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
1738
1739/** Worker for VBoxGuestHGCMAsyncWaitCallback*. */
1740static int VBoxGuestHGCMAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
1741 bool fInterruptible, uint32_t cMillies)
1742{
1743 int rc;
1744
1745 /*
1746 * Check to see if the condition was met by the time we got here.
1747 *
1748 * We create a simple poll loop here for dealing with out-of-memory
1749 * conditions since the caller isn't necessarily able to deal with
1750 * us returning too early.
1751 */
1752 PVBOXGUESTWAIT pWait;
1753 for (;;)
1754 {
1755 RTSpinlockAcquire(pDevExt->EventSpinlock);
1756 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1757 {
1758 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1759 return VINF_SUCCESS;
1760 }
1761 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1762
1763 pWait = VBoxGuestWaitAlloc(pDevExt, NULL);
1764 if (pWait)
1765 break;
1766 if (fInterruptible)
1767 return VERR_INTERRUPTED;
1768 RTThreadSleep(1);
1769 }
1770 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
1771 pWait->pHGCMReq = pHdr;
1772
1773 /*
1774 * Re-enter the spinlock and re-check for the condition.
1775 * If the condition is met, return.
1776 * Otherwise link us into the HGCM wait list and go to sleep.
1777 */
1778 RTSpinlockAcquire(pDevExt->EventSpinlock);
1779 RTListAppend(&pDevExt->HGCMWaitList, &pWait->ListNode);
1780 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1781 {
1782 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1783 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1784 return VINF_SUCCESS;
1785 }
1786 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1787
1788 if (fInterruptible)
1789 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
1790 else
1791 rc = RTSemEventMultiWait(pWait->Event, cMillies);
1792 if (rc == VERR_SEM_DESTROYED)
1793 return rc;
1794
1795 /*
1796 * Unlink, free and return.
1797 */
1798 if ( RT_FAILURE(rc)
1799 && rc != VERR_TIMEOUT
1800 && ( !fInterruptible
1801 || rc != VERR_INTERRUPTED))
1802 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
1803
1804 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1805 return rc;
1806}
1807
1808
1809/**
1810 * This is a callback for dealing with async waits.
1811 *
1812 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1813 */
1814static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
1815{
1816 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1817 Log(("VBoxGuestHGCMAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
1818 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1819 pDevExt,
1820 false /* fInterruptible */,
1821 u32User /* cMillies */);
1822}
1823
1824
1825/**
1826 * This is a callback for dealing with async waits with a timeout.
1827 *
1828 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1829 */
1830static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr,
1831 void *pvUser, uint32_t u32User)
1832{
1833 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1834 Log(("VBoxGuestHGCMAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
1835 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1836 pDevExt,
1837 true /* fInterruptible */,
1838 u32User /* cMillies */ );
1839
1840}
1841
1842
1843static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1844 VBoxGuestHGCMConnectInfo *pInfo, size_t *pcbDataReturned)
1845{
1846 int rc;
1847
1848 /*
1849 * The VbglHGCMConnect call will invoke the callback if the HGCM
1850 * call is performed in an ASYNC fashion. The function is not able
1851 * to deal with cancelled requests.
1852 */
1853 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
1854 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
1855 ? pInfo->Loc.u.host.achName : "<not local host>"));
1856
1857 rc = VbglR0HGCMInternalConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1858 if (RT_SUCCESS(rc))
1859 {
1860 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
1861 pInfo->u32ClientID, pInfo->result, rc));
1862 if (RT_SUCCESS(pInfo->result))
1863 {
1864 /*
1865 * Append the client id to the client id table.
1866 * If the table has somehow become filled up, we'll disconnect the session.
1867 */
1868 unsigned i;
1869 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1870 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1871 if (!pSession->aHGCMClientIds[i])
1872 {
1873 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
1874 break;
1875 }
1876 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
1877 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1878 {
1879 static unsigned s_cErrors = 0;
1880 VBoxGuestHGCMDisconnectInfo Info;
1881
1882 if (s_cErrors++ < 32)
1883 LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
1884
1885 Info.result = 0;
1886 Info.u32ClientID = pInfo->u32ClientID;
1887 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1888 return VERR_TOO_MANY_OPEN_FILES;
1889 }
1890 }
1891 if (pcbDataReturned)
1892 *pcbDataReturned = sizeof(*pInfo);
1893 }
1894 return rc;
1895}
1896
1897
1898static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
1899 size_t *pcbDataReturned)
1900{
1901 /*
1902 * Validate the client id and invalidate its entry while we're in the call.
1903 */
1904 int rc;
1905 const uint32_t u32ClientId = pInfo->u32ClientID;
1906 unsigned i;
1907 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1908 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1909 if (pSession->aHGCMClientIds[i] == u32ClientId)
1910 {
1911 pSession->aHGCMClientIds[i] = UINT32_MAX;
1912 break;
1913 }
1914 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
1915 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1916 {
1917 static unsigned s_cErrors = 0;
1918 if (s_cErrors++ > 32)
1919 LogRel(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
1920 return VERR_INVALID_HANDLE;
1921 }
1922
1923 /*
1924 * The VbglHGCMConnect call will invoke the callback if the HGCM
1925 * call is performed in an ASYNC fashion. The function is not able
1926 * to deal with cancelled requests.
1927 */
1928 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
1929 rc = VbglR0HGCMInternalDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1930 if (RT_SUCCESS(rc))
1931 {
1932 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
1933 if (pcbDataReturned)
1934 *pcbDataReturned = sizeof(*pInfo);
1935 }
1936
1937 /* Update the client id array according to the result. */
1938 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1939 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
1940 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
1941 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
1942
1943 return rc;
1944}
1945
1946
1947static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt,
1948 PVBOXGUESTSESSION pSession,
1949 VBoxGuestHGCMCallInfo *pInfo,
1950 uint32_t cMillies, bool fInterruptible, bool f32bit, bool fUserData,
1951 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
1952{
1953 const uint32_t u32ClientId = pInfo->u32ClientID;
1954 uint32_t fFlags;
1955 size_t cbActual;
1956 unsigned i;
1957 int rc;
1958
1959 /*
1960 * Some more validations.
1961 */
1962 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
1963 {
1964 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
1965 return VERR_INVALID_PARAMETER;
1966 }
1967
1968 cbActual = cbExtra + sizeof(*pInfo);
1969#ifdef RT_ARCH_AMD64
1970 if (f32bit)
1971 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
1972 else
1973#endif
1974 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
1975 if (cbData < cbActual)
1976 {
1977 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
1978 cbData, cbActual));
1979 return VERR_INVALID_PARAMETER;
1980 }
1981
1982 /*
1983 * Validate the client id.
1984 */
1985 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1986 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1987 if (pSession->aHGCMClientIds[i] == u32ClientId)
1988 break;
1989 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
1990 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
1991 {
1992 static unsigned s_cErrors = 0;
1993 if (s_cErrors++ > 32)
1994 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
1995 return VERR_INVALID_HANDLE;
1996 }
1997
1998 /*
1999 * The VbglHGCMCall call will invoke the callback if the HGCM
2000 * call is performed in an ASYNC fashion. This function can
2001 * deal with cancelled requests, so we let user more requests
2002 * be interruptible (should add a flag for this later I guess).
2003 */
2004 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
2005 fFlags = !fUserData && pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
2006#ifdef RT_ARCH_AMD64
2007 if (f32bit)
2008 {
2009 if (fInterruptible)
2010 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2011 else
2012 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
2013 }
2014 else
2015#endif
2016 {
2017 if (fInterruptible)
2018 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
2019 else
2020 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
2021 }
2022 if (RT_SUCCESS(rc))
2023 {
2024 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
2025 if (pcbDataReturned)
2026 *pcbDataReturned = cbActual;
2027 }
2028 else
2029 {
2030 if ( rc != VERR_INTERRUPTED
2031 && rc != VERR_TIMEOUT)
2032 {
2033 static unsigned s_cErrors = 0;
2034 if (s_cErrors++ < 32)
2035 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
2036 }
2037 else
2038 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
2039 }
2040 return rc;
2041}
2042
2043
2044#endif /* VBOX_WITH_HGCM */
2045
2046/**
2047 * Handle VBOXGUEST_IOCTL_CHECK_BALLOON from R3.
2048 *
2049 * Ask the host for the size of the balloon and try to set it accordingly. If
2050 * this approach fails because it's not supported, return with fHandleInR3 set
2051 * and let the user land supply memory we can lock via the other ioctl.
2052 *
2053 * @returns VBox status code.
2054 *
2055 * @param pDevExt The device extension.
2056 * @param pSession The session.
2057 * @param pInfo The output buffer.
2058 * @param pcbDataReturned Where to store the amount of returned data. Can
2059 * be NULL.
2060 */
2061static int VBoxGuestCommonIOCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2062 VBoxGuestCheckBalloonInfo *pInfo, size_t *pcbDataReturned)
2063{
2064 VMMDevGetMemBalloonChangeRequest *pReq;
2065 int rc;
2066
2067 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON\n"));
2068 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2069 AssertRCReturn(rc, rc);
2070
2071 /*
2072 * The first user trying to query/change the balloon becomes the
2073 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
2074 */
2075 if ( pDevExt->MemBalloon.pOwner != pSession
2076 && pDevExt->MemBalloon.pOwner == NULL)
2077 pDevExt->MemBalloon.pOwner = pSession;
2078
2079 if (pDevExt->MemBalloon.pOwner == pSession)
2080 {
2081 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
2082 if (RT_SUCCESS(rc))
2083 {
2084 /*
2085 * This is a response to that event. Setting this bit means that
2086 * we request the value from the host and change the guest memory
2087 * balloon according to this value.
2088 */
2089 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
2090 rc = VbglGRPerform(&pReq->header);
2091 if (RT_SUCCESS(rc))
2092 {
2093 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
2094 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
2095
2096 pInfo->cBalloonChunks = pReq->cBalloonChunks;
2097 pInfo->fHandleInR3 = false;
2098
2099 rc = vboxGuestSetBalloonSizeKernel(pDevExt, pReq->cBalloonChunks, &pInfo->fHandleInR3);
2100 /* Ignore various out of memory failures. */
2101 if ( rc == VERR_NO_MEMORY
2102 || rc == VERR_NO_PHYS_MEMORY
2103 || rc == VERR_NO_CONT_MEMORY)
2104 rc = VINF_SUCCESS;
2105
2106 if (pcbDataReturned)
2107 *pcbDataReturned = sizeof(VBoxGuestCheckBalloonInfo);
2108 }
2109 else
2110 LogRel(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON: VbglGRPerform failed. rc=%Rrc\n", rc));
2111 VbglGRFree(&pReq->header);
2112 }
2113 }
2114 else
2115 rc = VERR_PERMISSION_DENIED;
2116
2117 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2118 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON returns %Rrc\n", rc));
2119 return rc;
2120}
2121
2122
2123/**
2124 * Handle a request for changing the memory balloon.
2125 *
2126 * @returns VBox status code.
2127 *
2128 * @param pDevExt The device extention.
2129 * @param pSession The session.
2130 * @param pInfo The change request structure (input).
2131 * @param pcbDataReturned Where to store the amount of returned data. Can
2132 * be NULL.
2133 */
2134static int VBoxGuestCommonIOCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2135 VBoxGuestChangeBalloonInfo *pInfo, size_t *pcbDataReturned)
2136{
2137 int rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2138 AssertRCReturn(rc, rc);
2139
2140 if (!pDevExt->MemBalloon.fUseKernelAPI)
2141 {
2142 /*
2143 * The first user trying to query/change the balloon becomes the
2144 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
2145 */
2146 if ( pDevExt->MemBalloon.pOwner != pSession
2147 && pDevExt->MemBalloon.pOwner == NULL)
2148 pDevExt->MemBalloon.pOwner = pSession;
2149
2150 if (pDevExt->MemBalloon.pOwner == pSession)
2151 {
2152 rc = vboxGuestSetBalloonSizeFromUser(pDevExt, pSession, pInfo->u64ChunkAddr, !!pInfo->fInflate);
2153 if (pcbDataReturned)
2154 *pcbDataReturned = 0;
2155 }
2156 else
2157 rc = VERR_PERMISSION_DENIED;
2158 }
2159 else
2160 rc = VERR_PERMISSION_DENIED;
2161
2162 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2163 return rc;
2164}
2165
2166
2167/**
2168 * Handle a request for writing a core dump of the guest on the host.
2169 *
2170 * @returns VBox status code.
2171 *
2172 * @param pDevExt The device extension.
2173 * @param pInfo The output buffer.
2174 */
2175static int VBoxGuestCommonIOCtl_WriteCoreDump(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWriteCoreDump *pInfo)
2176{
2177 VMMDevReqWriteCoreDump *pReq = NULL;
2178 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_WriteCoreDump);
2179 if (RT_FAILURE(rc))
2180 {
2181 Log(("VBoxGuestCommonIOCtl: WRITE_CORE_DUMP: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
2182 sizeof(*pReq), sizeof(*pReq), rc));
2183 return rc;
2184 }
2185
2186 pReq->fFlags = pInfo->fFlags;
2187 rc = VbglGRPerform(&pReq->header);
2188 if (RT_FAILURE(rc))
2189 Log(("VBoxGuestCommonIOCtl: WRITE_CORE_DUMP: VbglGRPerform failed, rc=%Rrc!\n", rc));
2190
2191 VbglGRFree(&pReq->header);
2192 return rc;
2193}
2194
2195
2196#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
2197/**
2198 * Enables the VRDP session and saves its session ID.
2199 *
2200 * @returns VBox status code.
2201 *
2202 * @param pDevExt The device extention.
2203 * @param pSession The session.
2204 */
2205static int VBoxGuestCommonIOCtl_EnableVRDPSession(VBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2206{
2207 /* Nothing to do here right now, since this only is supported on Windows at the moment. */
2208 return VERR_NOT_IMPLEMENTED;
2209}
2210
2211
2212/**
2213 * Disables the VRDP session.
2214 *
2215 * @returns VBox status code.
2216 *
2217 * @param pDevExt The device extention.
2218 * @param pSession The session.
2219 */
2220static int VBoxGuestCommonIOCtl_DisableVRDPSession(VBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2221{
2222 /* Nothing to do here right now, since this only is supported on Windows at the moment. */
2223 return VERR_NOT_IMPLEMENTED;
2224}
2225#endif /* VBOX_WITH_VRDP_SESSION_HANDLING */
2226
2227#ifdef DEBUG
2228/** Unit test SetMouseStatus instead of really executing the request. */
2229static bool g_test_fSetMouseStatus = false;
2230/** When unit testing SetMouseStatus, the fake RC for the GR to return. */
2231static int g_test_SetMouseStatusGRRC;
2232/** When unit testing SetMouseStatus this will be set to the status passed to
2233 * the GR. */
2234static uint32_t g_test_statusSetMouseStatus;
2235#endif
2236
2237static int vboxguestcommonSetMouseStatus(uint32_t fFeatures)
2238{
2239 VMMDevReqMouseStatus *pReq;
2240 int rc;
2241
2242 LogRelFlowFunc(("fFeatures=%u\n", (int) fFeatures));
2243 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetMouseStatus);
2244 if (RT_SUCCESS(rc))
2245 {
2246 pReq->mouseFeatures = fFeatures;
2247 pReq->pointerXPos = 0;
2248 pReq->pointerYPos = 0;
2249#ifdef DEBUG
2250 if (g_test_fSetMouseStatus)
2251 {
2252 g_test_statusSetMouseStatus = pReq->mouseFeatures;
2253 rc = g_test_SetMouseStatusGRRC;
2254 }
2255 else
2256#endif
2257 rc = VbglGRPerform(&pReq->header);
2258 VbglGRFree(&pReq->header);
2259 }
2260 LogRelFlowFunc(("rc=%Rrc\n", rc));
2261 return rc;
2262}
2263
2264
2265/**
2266 * Sets the mouse status features for this session and updates them
2267 * globally. We aim to ensure that if several threads call this in
2268 * parallel the most recent status will always end up being set.
2269 *
2270 * @returns VBox status code.
2271 *
2272 * @param pDevExt The device extention.
2273 * @param pSession The session.
2274 * @param fFeatures New bitmap of enabled features.
2275 */
2276static int VBoxGuestCommonIOCtl_SetMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fFeatures)
2277{
2278 uint32_t fNewDevExtStatus = 0;
2279 unsigned i;
2280 int rc;
2281 /* Exit early if nothing has changed - hack to work around the
2282 * Windows Additions not using the common code. */
2283 bool fNoAction;
2284
2285 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2286
2287 for (i = 0; i < sizeof(fFeatures) * 8; i++)
2288 {
2289 if (RT_BIT_32(i) & VMMDEV_MOUSE_GUEST_MASK)
2290 {
2291 if ( (RT_BIT_32(i) & fFeatures)
2292 && !(RT_BIT_32(i) & pSession->fMouseStatus))
2293 pDevExt->acMouseFeatureUsage[i]++;
2294 else if ( !(RT_BIT_32(i) & fFeatures)
2295 && (RT_BIT_32(i) & pSession->fMouseStatus))
2296 pDevExt->acMouseFeatureUsage[i]--;
2297 }
2298 if (pDevExt->acMouseFeatureUsage[i] > 0)
2299 fNewDevExtStatus |= RT_BIT_32(i);
2300 }
2301
2302 pSession->fMouseStatus = fFeatures & VMMDEV_MOUSE_GUEST_MASK;
2303 fNoAction = (pDevExt->fMouseStatus == fNewDevExtStatus);
2304 pDevExt->fMouseStatus = fNewDevExtStatus;
2305
2306 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
2307 if (fNoAction)
2308 return VINF_SUCCESS;
2309
2310 do
2311 {
2312 fNewDevExtStatus = pDevExt->fMouseStatus;
2313 rc = vboxguestcommonSetMouseStatus(fNewDevExtStatus);
2314 } while ( RT_SUCCESS(rc)
2315 && fNewDevExtStatus != pDevExt->fMouseStatus);
2316
2317 return rc;
2318}
2319
2320
2321#ifdef DEBUG
2322/** Unit test for the SET_MOUSE_STATUS IoCtl. Since this is closely tied to
2323 * the code in question it probably makes most sense to keep it next to the
2324 * code. */
2325static void testSetMouseStatus(void)
2326{
2327 uint32_t u32Data;
2328 int rc;
2329 RTSPINLOCK Spinlock;
2330
2331 g_test_fSetMouseStatus = true;
2332 rc = RTSpinlockCreate(&Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestTest");
2333 AssertRCReturnVoid(rc);
2334 {
2335 VBOXGUESTDEVEXT DevExt = { 0 };
2336 VBOXGUESTSESSION Session = { 0 };
2337
2338 g_test_statusSetMouseStatus = ~0;
2339 g_test_SetMouseStatusGRRC = VINF_SUCCESS;
2340 DevExt.SessionSpinlock = Spinlock;
2341 u32Data = VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE;
2342 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2343 &Session, &u32Data, sizeof(u32Data), NULL);
2344 AssertRCSuccess(rc);
2345 AssertMsg( g_test_statusSetMouseStatus
2346 == VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE,
2347 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2348 DevExt.acMouseFeatureUsage[ASMBitFirstSetU32(VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR) - 1] = 1;
2349 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2350 &Session, &u32Data, sizeof(u32Data), NULL);
2351 AssertRCSuccess(rc);
2352 AssertMsg( g_test_statusSetMouseStatus
2353 == ( VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE
2354 | VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR),
2355 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2356 u32Data = VMMDEV_MOUSE_HOST_WANTS_ABSOLUTE; /* Can't change this */
2357 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2358 &Session, &u32Data, sizeof(u32Data), NULL);
2359 AssertRCSuccess(rc);
2360 AssertMsg( g_test_statusSetMouseStatus
2361 == VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR,
2362 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2363 u32Data = VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR;
2364 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2365 &Session, &u32Data, sizeof(u32Data), NULL);
2366 AssertRCSuccess(rc);
2367 AssertMsg( g_test_statusSetMouseStatus
2368 == VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR,
2369 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2370 u32Data = 0;
2371 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2372 &Session, &u32Data, sizeof(u32Data), NULL);
2373 AssertRCSuccess(rc);
2374 AssertMsg( g_test_statusSetMouseStatus
2375 == VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR,
2376 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2377 AssertMsg(DevExt.acMouseFeatureUsage[ASMBitFirstSetU32(VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR) - 1] == 1,
2378 ("Actual value: %d\n", DevExt.acMouseFeatureUsage[ASMBitFirstSetU32(VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR)]));
2379 g_test_SetMouseStatusGRRC = VERR_UNRESOLVED_ERROR;
2380 /* This should succeed as the host request should not be made
2381 * since nothing has changed. */
2382 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2383 &Session, &u32Data, sizeof(u32Data), NULL);
2384 AssertRCSuccess(rc);
2385 /* This should fail with VERR_UNRESOLVED_ERROR as set above. */
2386 u32Data = VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE;
2387 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2388 &Session, &u32Data, sizeof(u32Data), NULL);
2389 AssertMsg(rc == VERR_UNRESOLVED_ERROR, ("rc == %Rrc\n", rc));
2390 /* Untested paths: out of memory; race setting status to host */
2391 }
2392 RTSpinlockDestroy(Spinlock);
2393 g_test_fSetMouseStatus = false;
2394}
2395#endif
2396
2397
2398/**
2399 * Guest backdoor logging.
2400 *
2401 * @returns VBox status code.
2402 *
2403 * @param pDevExt The device extension.
2404 * @param pch The log message (need not be NULL terminated).
2405 * @param cbData Size of the buffer.
2406 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2407 */
2408static int VBoxGuestCommonIOCtl_Log(PVBOXGUESTDEVEXT pDevExt, const char *pch, size_t cbData, size_t *pcbDataReturned)
2409{
2410 NOREF(pch);
2411 NOREF(cbData);
2412 if (pDevExt->fLoggingEnabled)
2413 RTLogBackdoorPrintf("%.*s", cbData, pch);
2414 else
2415 Log(("%.*s", cbData, pch));
2416 if (pcbDataReturned)
2417 *pcbDataReturned = 0;
2418 return VINF_SUCCESS;
2419}
2420
2421
2422/**
2423 * Common IOCtl for user to kernel and kernel to kernel communication.
2424 *
2425 * This function only does the basic validation and then invokes
2426 * worker functions that takes care of each specific function.
2427 *
2428 * @returns VBox status code.
2429 *
2430 * @param iFunction The requested function.
2431 * @param pDevExt The device extension.
2432 * @param pSession The client session.
2433 * @param pvData The input/output data buffer. Can be NULL depending on the function.
2434 * @param cbData The max size of the data buffer.
2435 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2436 */
2437int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2438 void *pvData, size_t cbData, size_t *pcbDataReturned)
2439{
2440 int rc;
2441 Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
2442 iFunction, pDevExt, pSession, pvData, cbData));
2443
2444 /*
2445 * Make sure the returned data size is set to zero.
2446 */
2447 if (pcbDataReturned)
2448 *pcbDataReturned = 0;
2449
2450 /*
2451 * Define some helper macros to simplify validation.
2452 */
2453#define CHECKRET_RING0(mnemonic) \
2454 do { \
2455 if (pSession->R0Process != NIL_RTR0PROCESS) \
2456 { \
2457 LogFunc((mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
2458 pSession->Process, (uintptr_t)pSession->R0Process)); \
2459 return VERR_PERMISSION_DENIED; \
2460 } \
2461 } while (0)
2462#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
2463 do { \
2464 if (cbData < (cbMin)) \
2465 { \
2466 LogFunc((mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
2467 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
2468 return VERR_BUFFER_OVERFLOW; \
2469 } \
2470 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
2471 { \
2472 LogFunc((mnemonic ": Invalid pointer %p\n", pvData)); \
2473 return VERR_INVALID_POINTER; \
2474 } \
2475 } while (0)
2476#define CHECKRET_SIZE(mnemonic, cb) \
2477 do { \
2478 if (cbData != (cb)) \
2479 { \
2480 LogFunc((mnemonic ": cbData=%#zx (%zu) expected is %#zx (%zu)\n", \
2481 cbData, cbData, (size_t)(cb), (size_t)(cb))); \
2482 return VERR_BUFFER_OVERFLOW; \
2483 } \
2484 if ((cb) != 0 && !VALID_PTR(pvData)) \
2485 { \
2486 LogFunc((mnemonic ": Invalid pointer %p\n", pvData)); \
2487 return VERR_INVALID_POINTER; \
2488 } \
2489 } while (0)
2490
2491
2492 /*
2493 * Deal with variably sized requests first.
2494 */
2495 rc = VINF_SUCCESS;
2496 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
2497 {
2498 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
2499 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
2500 }
2501#ifdef VBOX_WITH_DPC_LATENCY_CHECKER
2502 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_DPC))
2503 {
2504 rc = VBoxGuestCommonIOCtl_DPC(pDevExt, pSession, pvData, cbData, pcbDataReturned);
2505 }
2506#endif /* VBOX_WITH_DPC_LATENCY_CHECKER */
2507#ifdef VBOX_WITH_HGCM
2508 /*
2509 * These ones are a bit tricky.
2510 */
2511 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
2512 {
2513 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2514 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2515 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2516 fInterruptible, false /*f32bit*/, false /* fUserData */,
2517 0, cbData, pcbDataReturned);
2518 }
2519 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
2520 {
2521 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2522 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2523 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2524 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2525 false /*f32bit*/, false /* fUserData */,
2526 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2527 }
2528 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_USERDATA(0)))
2529 {
2530 bool fInterruptible = true;
2531 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2532 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2533 fInterruptible, false /*f32bit*/, true /* fUserData */,
2534 0, cbData, pcbDataReturned);
2535 }
2536# ifdef RT_ARCH_AMD64
2537 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
2538 {
2539 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2540 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2541 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2542 fInterruptible, true /*f32bit*/, false /* fUserData */,
2543 0, cbData, pcbDataReturned);
2544 }
2545 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
2546 {
2547 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2548 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2549 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2550 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2551 true /*f32bit*/, false /* fUserData */,
2552 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2553 }
2554# endif
2555#endif /* VBOX_WITH_HGCM */
2556 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
2557 {
2558 CHECKRET_MIN_SIZE("LOG", 1);
2559 rc = VBoxGuestCommonIOCtl_Log(pDevExt, (char *)pvData, cbData, pcbDataReturned);
2560 }
2561 else
2562 {
2563 switch (iFunction)
2564 {
2565 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
2566 CHECKRET_RING0("GETVMMDEVPORT");
2567 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
2568 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
2569 break;
2570
2571#ifndef RT_OS_WINDOWS /* Windows has its own implementation of this. */
2572 case VBOXGUEST_IOCTL_SET_MOUSE_NOTIFY_CALLBACK:
2573 CHECKRET_RING0("SET_MOUSE_NOTIFY_CALLBACK");
2574 CHECKRET_SIZE("SET_MOUSE_NOTIFY_CALLBACK", sizeof(VBoxGuestMouseSetNotifyCallback));
2575 rc = VBoxGuestCommonIOCtl_SetMouseNotifyCallback(pDevExt, (VBoxGuestMouseSetNotifyCallback *)pvData);
2576 break;
2577#endif
2578
2579 case VBOXGUEST_IOCTL_WAITEVENT:
2580 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
2581 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, pSession, (VBoxGuestWaitEventInfo *)pvData,
2582 pcbDataReturned, pSession->R0Process != NIL_RTR0PROCESS);
2583 break;
2584
2585 case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
2586 if (cbData != 0)
2587 rc = VERR_INVALID_PARAMETER;
2588 rc = VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
2589 break;
2590
2591 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
2592 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
2593 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, (VBoxGuestFilterMaskInfo *)pvData);
2594 break;
2595
2596#ifdef VBOX_WITH_HGCM
2597 case VBOXGUEST_IOCTL_HGCM_CONNECT:
2598# ifdef RT_ARCH_AMD64
2599 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
2600# endif
2601 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
2602 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
2603 break;
2604
2605 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
2606# ifdef RT_ARCH_AMD64
2607 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
2608# endif
2609 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
2610 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
2611 break;
2612#endif /* VBOX_WITH_HGCM */
2613
2614 case VBOXGUEST_IOCTL_CHECK_BALLOON:
2615 CHECKRET_MIN_SIZE("CHECK_MEMORY_BALLOON", sizeof(VBoxGuestCheckBalloonInfo));
2616 rc = VBoxGuestCommonIOCtl_CheckMemoryBalloon(pDevExt, pSession, (VBoxGuestCheckBalloonInfo *)pvData, pcbDataReturned);
2617 break;
2618
2619 case VBOXGUEST_IOCTL_CHANGE_BALLOON:
2620 CHECKRET_MIN_SIZE("CHANGE_MEMORY_BALLOON", sizeof(VBoxGuestChangeBalloonInfo));
2621 rc = VBoxGuestCommonIOCtl_ChangeMemoryBalloon(pDevExt, pSession, (VBoxGuestChangeBalloonInfo *)pvData, pcbDataReturned);
2622 break;
2623
2624 case VBOXGUEST_IOCTL_WRITE_CORE_DUMP:
2625 CHECKRET_MIN_SIZE("WRITE_CORE_DUMP", sizeof(VBoxGuestWriteCoreDump));
2626 rc = VBoxGuestCommonIOCtl_WriteCoreDump(pDevExt, (VBoxGuestWriteCoreDump *)pvData);
2627 break;
2628
2629#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
2630 case VBOXGUEST_IOCTL_ENABLE_VRDP_SESSION:
2631 rc = VBoxGuestCommonIOCtl_EnableVRDPSession(pDevExt, pSession);
2632 break;
2633
2634 case VBOXGUEST_IOCTL_DISABLE_VRDP_SESSION:
2635 rc = VBoxGuestCommonIOCtl_DisableVRDPSession(pDevExt, pSession);
2636 break;
2637#endif /* VBOX_WITH_VRDP_SESSION_HANDLING */
2638 case VBOXGUEST_IOCTL_SET_MOUSE_STATUS:
2639 CHECKRET_SIZE("SET_MOUSE_STATUS", sizeof(uint32_t));
2640 rc = VBoxGuestCommonIOCtl_SetMouseStatus(pDevExt, pSession,
2641 *(uint32_t *)pvData);
2642 break;
2643
2644 default:
2645 {
2646 LogRel(("VBoxGuestCommonIOCtl: Unknown request iFunction=%#x Stripped size=%#x\n", iFunction,
2647 VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
2648 rc = VERR_NOT_SUPPORTED;
2649 break;
2650 }
2651 }
2652 }
2653
2654 Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
2655 return rc;
2656}
2657
2658
2659
2660/**
2661 * Common interrupt service routine.
2662 *
2663 * This deals with events and with waking up thread waiting for those events.
2664 *
2665 * @returns true if it was our interrupt, false if it wasn't.
2666 * @param pDevExt The VBoxGuest device extension.
2667 */
2668bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
2669{
2670#ifndef RT_OS_WINDOWS
2671 VBoxGuestMouseSetNotifyCallback MouseNotifyCallback = { NULL, NULL };
2672#endif
2673 bool fMousePositionChanged = false;
2674 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
2675 int rc = 0;
2676 bool fOurIrq;
2677
2678 /*
2679 * Make sure we've initialized the device extension.
2680 */
2681 if (RT_UNLIKELY(!pReq))
2682 return false;
2683
2684 /*
2685 * Enter the spinlock, increase the ISR count and check if it's our IRQ or
2686 * not.
2687 */
2688 RTSpinlockAcquire(pDevExt->EventSpinlock);
2689 ASMAtomicIncU32(&pDevExt->cISR);
2690 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
2691 if (fOurIrq)
2692 {
2693 /*
2694 * Acknowlegde events.
2695 * We don't use VbglGRPerform here as it may take another spinlocks.
2696 */
2697 pReq->header.rc = VERR_INTERNAL_ERROR;
2698 pReq->events = 0;
2699 ASMCompilerBarrier();
2700 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
2701 ASMCompilerBarrier(); /* paranoia */
2702 if (RT_SUCCESS(pReq->header.rc))
2703 {
2704 uint32_t fEvents = pReq->events;
2705 PVBOXGUESTWAIT pWait;
2706 PVBOXGUESTWAIT pSafe;
2707
2708 Log(("VBoxGuestCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
2709
2710 /*
2711 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
2712 */
2713 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
2714 {
2715#ifndef RT_OS_WINDOWS
2716 MouseNotifyCallback = pDevExt->MouseNotifyCallback;
2717#endif
2718 fMousePositionChanged = true;
2719 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
2720 }
2721
2722#ifdef VBOX_WITH_HGCM
2723 /*
2724 * The HGCM event/list is kind of different in that we evaluate all entries.
2725 */
2726 if (fEvents & VMMDEV_EVENT_HGCM)
2727 {
2728 RTListForEachSafe(&pDevExt->HGCMWaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
2729 {
2730 if (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE)
2731 {
2732 pWait->fResEvents = VMMDEV_EVENT_HGCM;
2733 RTListNodeRemove(&pWait->ListNode);
2734# ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2735 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
2736# else
2737 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
2738 rc |= RTSemEventMultiSignal(pWait->Event);
2739# endif
2740 }
2741 }
2742 fEvents &= ~VMMDEV_EVENT_HGCM;
2743 }
2744#endif
2745
2746 /*
2747 * Normal FIFO waiter evaluation.
2748 */
2749 fEvents |= pDevExt->f32PendingEvents;
2750 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
2751 {
2752 if ( (pWait->fReqEvents & fEvents)
2753 && !pWait->fResEvents)
2754 {
2755 pWait->fResEvents = pWait->fReqEvents & fEvents;
2756 fEvents &= ~pWait->fResEvents;
2757 RTListNodeRemove(&pWait->ListNode);
2758#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2759 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
2760#else
2761 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
2762 rc |= RTSemEventMultiSignal(pWait->Event);
2763#endif
2764 if (!fEvents)
2765 break;
2766 }
2767 }
2768 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
2769 }
2770 else /* something is serious wrong... */
2771 Log(("VBoxGuestCommonISR: acknowledge events failed rc=%Rrc (events=%#x)!!\n",
2772 pReq->header.rc, pReq->events));
2773 }
2774 else
2775 LogFlow(("VBoxGuestCommonISR: not ours\n"));
2776
2777 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
2778
2779#if defined(VBOXGUEST_USE_DEFERRED_WAKE_UP) && !defined(RT_OS_WINDOWS)
2780 /*
2781 * Do wake-ups.
2782 * Note. On Windows this isn't possible at this IRQL, so a DPC will take
2783 * care of it.
2784 */
2785 VBoxGuestWaitDoWakeUps(pDevExt);
2786#endif
2787
2788 /*
2789 * Work the poll and async notification queues on OSes that implements that.
2790 * (Do this outside the spinlock to prevent some recursive spinlocking.)
2791 */
2792 if (fMousePositionChanged)
2793 {
2794 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
2795 VBoxGuestNativeISRMousePollEvent(pDevExt);
2796#ifndef RT_OS_WINDOWS
2797 if (MouseNotifyCallback.pfnNotify)
2798 MouseNotifyCallback.pfnNotify(MouseNotifyCallback.pvUser);
2799#endif
2800 }
2801
2802 ASMAtomicDecU32(&pDevExt->cISR);
2803 Assert(rc == 0);
2804 return fOurIrq;
2805}
2806
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette