VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 41683

Last change on this file since 41683 was 41649, checked in by vboxsync, 13 years ago

Additions/VBoxGuest: Windows build fix

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 98.4 KB
Line 
1/* $Id: VBoxGuest.cpp 41649 2012-06-11 12:53:56Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEFAULT
23#include "VBoxGuestInternal.h"
24#include "VBoxGuest2.h"
25#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
26#include <VBox/log.h>
27#include <iprt/mem.h>
28#include <iprt/time.h>
29#include <iprt/memobj.h>
30#include <iprt/asm.h>
31#include <iprt/asm-amd64-x86.h>
32#include <iprt/string.h>
33#include <iprt/process.h>
34#include <iprt/assert.h>
35#include <iprt/param.h>
36#ifdef VBOX_WITH_HGCM
37# include <iprt/thread.h>
38#endif
39#include "version-generated.h"
40#if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
41# include "revision-generated.h"
42#endif
43#ifdef RT_OS_WINDOWS
44# ifndef CTL_CODE
45# include <Windows.h>
46# endif
47#endif
48#if defined(RT_OS_SOLARIS)
49# include <iprt/rand.h>
50#endif
51
52
53/*******************************************************************************
54* Internal Functions *
55*******************************************************************************/
56#ifdef VBOX_WITH_HGCM
57static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
58#endif
59#ifdef DEBUG
60static void testSetMouseStatus(void);
61#endif
62static int VBoxGuestCommonIOCtl_SetMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fFeatures);
63
64
65/*******************************************************************************
66* Global Variables *
67*******************************************************************************/
68static const size_t cbChangeMemBalloonReq = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
69
70#if defined(RT_OS_SOLARIS)
71/**
72 * Drag in the rest of IRPT since we share it with the
73 * rest of the kernel modules on Solaris.
74 */
75PFNRT g_apfnVBoxGuestIPRTDeps[] =
76{
77 /* VirtioNet */
78 (PFNRT)RTRandBytes,
79 NULL
80};
81#endif /* RT_OS_SOLARIS */
82
83
84/**
85 * Reserves memory in which the VMM can relocate any guest mappings
86 * that are floating around.
87 *
88 * This operation is a little bit tricky since the VMM might not accept
89 * just any address because of address clashes between the three contexts
90 * it operates in, so use a small stack to perform this operation.
91 *
92 * @returns VBox status code (ignored).
93 * @param pDevExt The device extension.
94 */
95static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
96{
97 /*
98 * Query the required space.
99 */
100 VMMDevReqHypervisorInfo *pReq;
101 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
102 if (RT_FAILURE(rc))
103 return rc;
104 pReq->hypervisorStart = 0;
105 pReq->hypervisorSize = 0;
106 rc = VbglGRPerform(&pReq->header);
107 if (RT_FAILURE(rc)) /* this shouldn't happen! */
108 {
109 VbglGRFree(&pReq->header);
110 return rc;
111 }
112
113 /*
114 * The VMM will report back if there is nothing it wants to map, like for
115 * instance in VT-x and AMD-V mode.
116 */
117 if (pReq->hypervisorSize == 0)
118 Log(("vboxGuestInitFixateGuestMappings: nothing to do\n"));
119 else
120 {
121 /*
122 * We have to try several times since the host can be picky
123 * about certain addresses.
124 */
125 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
126 uint32_t cbHypervisor = pReq->hypervisorSize;
127 RTR0MEMOBJ ahTries[5];
128 uint32_t iTry;
129 bool fBitched = false;
130 Log(("vboxGuestInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
131 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
132 {
133 /*
134 * Reserve space, or if that isn't supported, create a object for
135 * some fictive physical memory and map that in to kernel space.
136 *
137 * To make the code a bit uglier, most systems cannot help with
138 * 4MB alignment, so we have to deal with that in addition to
139 * having two ways of getting the memory.
140 */
141 uint32_t uAlignment = _4M;
142 RTR0MEMOBJ hObj;
143 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
144 if (rc == VERR_NOT_SUPPORTED)
145 {
146 uAlignment = PAGE_SIZE;
147 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
148 }
149 /*
150 * If both RTR0MemObjReserveKernel calls above failed because either not supported or
151 * not implemented at all at the current platform, try to map the memory object into the
152 * virtual kernel space.
153 */
154 if (rc == VERR_NOT_SUPPORTED)
155 {
156 if (hFictive == NIL_RTR0MEMOBJ)
157 {
158 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
159 if (RT_FAILURE(rc))
160 break;
161 hFictive = hObj;
162 }
163 uAlignment = _4M;
164 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
165 if (rc == VERR_NOT_SUPPORTED)
166 {
167 uAlignment = PAGE_SIZE;
168 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
169 }
170 }
171 if (RT_FAILURE(rc))
172 {
173 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
174 rc, cbHypervisor, uAlignment, iTry));
175 fBitched = true;
176 break;
177 }
178
179 /*
180 * Try set it.
181 */
182 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
183 pReq->header.rc = VERR_INTERNAL_ERROR;
184 pReq->hypervisorSize = cbHypervisor;
185 pReq->hypervisorStart = (uintptr_t)RTR0MemObjAddress(hObj);
186 if ( uAlignment == PAGE_SIZE
187 && pReq->hypervisorStart & (_4M - 1))
188 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
189 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
190
191 rc = VbglGRPerform(&pReq->header);
192 if (RT_SUCCESS(rc))
193 {
194 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
195 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
196 RTR0MemObjAddress(pDevExt->hGuestMappings),
197 RTR0MemObjSize(pDevExt->hGuestMappings),
198 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
199 break;
200 }
201 ahTries[iTry] = hObj;
202 }
203
204 /*
205 * Cleanup failed attempts.
206 */
207 while (iTry-- > 0)
208 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
209 if ( RT_FAILURE(rc)
210 && hFictive != NIL_RTR0PTR)
211 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
212 if (RT_FAILURE(rc) && !fBitched)
213 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
214 }
215 VbglGRFree(&pReq->header);
216
217 /*
218 * We ignore failed attempts for now.
219 */
220 return VINF_SUCCESS;
221}
222
223
224/**
225 * Undo what vboxGuestInitFixateGuestMappings did.
226 *
227 * @param pDevExt The device extension.
228 */
229static void vboxGuestTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
230{
231 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
232 {
233 /*
234 * Tell the host that we're going to free the memory we reserved for
235 * it, the free it up. (Leak the memory if anything goes wrong here.)
236 */
237 VMMDevReqHypervisorInfo *pReq;
238 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
239 if (RT_SUCCESS(rc))
240 {
241 pReq->hypervisorStart = 0;
242 pReq->hypervisorSize = 0;
243 rc = VbglGRPerform(&pReq->header);
244 VbglGRFree(&pReq->header);
245 }
246 if (RT_SUCCESS(rc))
247 {
248 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
249 AssertRC(rc);
250 }
251 else
252 LogRel(("vboxGuestTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
253
254 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
255 }
256}
257
258
259/**
260 * Sets the interrupt filter mask during initialization and termination.
261 *
262 * This will ASSUME that we're the ones in carge over the mask, so
263 * we'll simply clear all bits we don't set.
264 *
265 * @returns VBox status code (ignored).
266 * @param pDevExt The device extension.
267 * @param fMask The new mask.
268 */
269static int vboxGuestSetFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask)
270{
271 VMMDevCtlGuestFilterMask *pReq;
272 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
273 if (RT_SUCCESS(rc))
274 {
275 pReq->u32OrMask = fMask;
276 pReq->u32NotMask = ~fMask;
277 rc = VbglGRPerform(&pReq->header);
278 if (RT_FAILURE(rc))
279 LogRel(("vboxGuestSetFilterMask: failed with rc=%Rrc\n", rc));
280 VbglGRFree(&pReq->header);
281 }
282 return rc;
283}
284
285
286/**
287 * Inflate the balloon by one chunk represented by an R0 memory object.
288 *
289 * The caller owns the balloon mutex.
290 *
291 * @returns IPRT status code.
292 * @param pMemObj Pointer to the R0 memory object.
293 * @param pReq The pre-allocated request for performing the VMMDev call.
294 */
295static int vboxGuestBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
296{
297 uint32_t iPage;
298 int rc;
299
300 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
301 {
302 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
303 pReq->aPhysPage[iPage] = phys;
304 }
305
306 pReq->fInflate = true;
307 pReq->header.size = cbChangeMemBalloonReq;
308 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
309
310 rc = VbglGRPerform(&pReq->header);
311 if (RT_FAILURE(rc))
312 LogRel(("vboxGuestBalloonInflate: VbglGRPerform failed. rc=%Rrc\n", rc));
313 return rc;
314}
315
316
317/**
318 * Deflate the balloon by one chunk - info the host and free the memory object.
319 *
320 * The caller owns the balloon mutex.
321 *
322 * @returns IPRT status code.
323 * @param pMemObj Pointer to the R0 memory object.
324 * The memory object will be freed afterwards.
325 * @param pReq The pre-allocated request for performing the VMMDev call.
326 */
327static int vboxGuestBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
328{
329 uint32_t iPage;
330 int rc;
331
332 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
333 {
334 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
335 pReq->aPhysPage[iPage] = phys;
336 }
337
338 pReq->fInflate = false;
339 pReq->header.size = cbChangeMemBalloonReq;
340 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
341
342 rc = VbglGRPerform(&pReq->header);
343 if (RT_FAILURE(rc))
344 {
345 LogRel(("vboxGuestBalloonDeflate: VbglGRPerform failed. rc=%Rrc\n", rc));
346 return rc;
347 }
348
349 rc = RTR0MemObjFree(*pMemObj, true);
350 if (RT_FAILURE(rc))
351 {
352 LogRel(("vboxGuestBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
353 return rc;
354 }
355
356 *pMemObj = NIL_RTR0MEMOBJ;
357 return VINF_SUCCESS;
358}
359
360
361/**
362 * Inflate/deflate the memory balloon and notify the host.
363 *
364 * This is a worker used by VBoxGuestCommonIOCtl_CheckMemoryBalloon - it takes
365 * the mutex.
366 *
367 * @returns VBox status code.
368 * @param pDevExt The device extension.
369 * @param pSession The session.
370 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
371 * @param pfHandleInR3 Where to return the handle-in-ring3 indicator
372 * (VINF_SUCCESS if set).
373 */
374static int vboxGuestSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, uint32_t *pfHandleInR3)
375{
376 int rc = VINF_SUCCESS;
377
378 if (pDevExt->MemBalloon.fUseKernelAPI)
379 {
380 VMMDevChangeMemBalloon *pReq;
381 uint32_t i;
382
383 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
384 {
385 LogRel(("vboxGuestSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
386 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
387 return VERR_INVALID_PARAMETER;
388 }
389
390 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
391 return VINF_SUCCESS; /* nothing to do */
392
393 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
394 && !pDevExt->MemBalloon.paMemObj)
395 {
396 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
397 if (!pDevExt->MemBalloon.paMemObj)
398 {
399 LogRel(("VBoxGuestSetBalloonSizeKernel: no memory for paMemObj!\n"));
400 return VERR_NO_MEMORY;
401 }
402 }
403
404 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
405 if (RT_FAILURE(rc))
406 return rc;
407
408 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
409 {
410 /* inflate */
411 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
412 {
413 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
414 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
415 if (RT_FAILURE(rc))
416 {
417 if (rc == VERR_NOT_SUPPORTED)
418 {
419 /* not supported -- fall back to the R3-allocated memory. */
420 rc = VINF_SUCCESS;
421 pDevExt->MemBalloon.fUseKernelAPI = false;
422 Assert(pDevExt->MemBalloon.cChunks == 0);
423 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
424 }
425 /* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
426 * cannot allocate more memory => don't try further, just stop here */
427 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
428 break;
429 }
430
431 rc = vboxGuestBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
432 if (RT_FAILURE(rc))
433 {
434 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
435 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
436 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
437 break;
438 }
439 pDevExt->MemBalloon.cChunks++;
440 }
441 }
442 else
443 {
444 /* deflate */
445 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
446 {
447 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
448 if (RT_FAILURE(rc))
449 {
450 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
451 break;
452 }
453 pDevExt->MemBalloon.cChunks--;
454 }
455 }
456
457 VbglGRFree(&pReq->header);
458 }
459
460 /*
461 * Set the handle-in-ring3 indicator. When set Ring-3 will have to work
462 * the balloon changes via the other API.
463 */
464 *pfHandleInR3 = pDevExt->MemBalloon.fUseKernelAPI ? false : true;
465
466 return rc;
467}
468
469
470/**
471 * Helper to reinit the VBoxVMM communication after hibernation.
472 *
473 * @returns VBox status code.
474 * @param pDevExt The device extension.
475 * @param enmOSType The OS type.
476 */
477int VBoxGuestReinitDevExtAfterHibernation(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
478{
479 int rc = VBoxGuestReportGuestInfo(enmOSType);
480 if (RT_SUCCESS(rc))
481 {
482 rc = VBoxGuestReportDriverStatus(true /* Driver is active */);
483 if (RT_FAILURE(rc))
484 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: could not report guest driver status, rc=%Rrc\n", rc));
485 }
486 else
487 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: could not report guest information to host, rc=%Rrc\n", rc));
488 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: returned with rc=%Rrc\n", rc));
489 return rc;
490}
491
492
493/**
494 * Inflate/deflate the balloon by one chunk.
495 *
496 * Worker for VBoxGuestCommonIOCtl_ChangeMemoryBalloon - it takes the mutex.
497 *
498 * @returns VBox status code.
499 * @param pDevExt The device extension.
500 * @param pSession The session.
501 * @param u64ChunkAddr The address of the chunk to add to / remove from the
502 * balloon.
503 * @param fInflate Inflate if true, deflate if false.
504 */
505static int vboxGuestSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
506 uint64_t u64ChunkAddr, bool fInflate)
507{
508 VMMDevChangeMemBalloon *pReq;
509 int rc = VINF_SUCCESS;
510 uint32_t i;
511 PRTR0MEMOBJ pMemObj = NULL;
512
513 if (fInflate)
514 {
515 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
516 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
517 {
518 LogRel(("vboxGuestSetBalloonSize: cannot inflate balloon, already have %u chunks (max=%u)\n",
519 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
520 return VERR_INVALID_PARAMETER;
521 }
522
523 if (!pDevExt->MemBalloon.paMemObj)
524 {
525 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
526 if (!pDevExt->MemBalloon.paMemObj)
527 {
528 LogRel(("VBoxGuestSetBalloonSizeFromUser: no memory for paMemObj!\n"));
529 return VERR_NO_MEMORY;
530 }
531 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
532 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
533 }
534 }
535 else
536 {
537 if (pDevExt->MemBalloon.cChunks == 0)
538 {
539 AssertMsgFailed(("vboxGuestSetBalloonSize: cannot decrease balloon, already at size 0\n"));
540 return VERR_INVALID_PARAMETER;
541 }
542 }
543
544 /*
545 * Enumerate all memory objects and check if the object is already registered.
546 */
547 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
548 {
549 if ( fInflate
550 && !pMemObj
551 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
552 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
553 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == u64ChunkAddr)
554 {
555 if (fInflate)
556 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
557 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
558 break;
559 }
560 }
561 if (!pMemObj)
562 {
563 if (fInflate)
564 {
565 /* no free object pointer found -- should not happen */
566 return VERR_NO_MEMORY;
567 }
568
569 /* cannot free this memory as it wasn't provided before */
570 return VERR_NOT_FOUND;
571 }
572
573 /*
574 * Try inflate / default the balloon as requested.
575 */
576 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
577 if (RT_FAILURE(rc))
578 return rc;
579
580 if (fInflate)
581 {
582 rc = RTR0MemObjLockUser(pMemObj, (RTR3PTR)u64ChunkAddr, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
583 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
584 if (RT_SUCCESS(rc))
585 {
586 rc = vboxGuestBalloonInflate(pMemObj, pReq);
587 if (RT_SUCCESS(rc))
588 pDevExt->MemBalloon.cChunks++;
589 else
590 {
591 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
592 RTR0MemObjFree(*pMemObj, true);
593 *pMemObj = NIL_RTR0MEMOBJ;
594 }
595 }
596 }
597 else
598 {
599 rc = vboxGuestBalloonDeflate(pMemObj, pReq);
600 if (RT_SUCCESS(rc))
601 pDevExt->MemBalloon.cChunks--;
602 else
603 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
604 }
605
606 VbglGRFree(&pReq->header);
607 return rc;
608}
609
610
611/**
612 * Cleanup the memory balloon of a session.
613 *
614 * Will request the balloon mutex, so it must be valid and the caller must not
615 * own it already.
616 *
617 * @param pDevExt The device extension.
618 * @param pDevExt The session. Can be NULL at unload.
619 */
620static void vboxGuestCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
621{
622 RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
623 if ( pDevExt->MemBalloon.pOwner == pSession
624 || pSession == NULL /*unload*/)
625 {
626 if (pDevExt->MemBalloon.paMemObj)
627 {
628 VMMDevChangeMemBalloon *pReq;
629 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
630 if (RT_SUCCESS(rc))
631 {
632 uint32_t i;
633 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
634 {
635 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
636 if (RT_FAILURE(rc))
637 {
638 LogRel(("vboxGuestCloseMemBalloon: Deflate failed with rc=%Rrc. Will leak %u chunks.\n",
639 rc, pDevExt->MemBalloon.cChunks));
640 break;
641 }
642 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
643 pDevExt->MemBalloon.cChunks--;
644 }
645 VbglGRFree(&pReq->header);
646 }
647 else
648 LogRel(("vboxGuestCloseMemBalloon: Failed to allocate VMMDev request buffer (rc=%Rrc). Will leak %u chunks.\n",
649 rc, pDevExt->MemBalloon.cChunks));
650 RTMemFree(pDevExt->MemBalloon.paMemObj);
651 pDevExt->MemBalloon.paMemObj = NULL;
652 }
653
654 pDevExt->MemBalloon.pOwner = NULL;
655 }
656 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
657}
658
659
660/**
661 * Initializes the VBoxGuest device extension when the
662 * device driver is loaded.
663 *
664 * The native code locates the VMMDev on the PCI bus and retrieve
665 * the MMIO and I/O port ranges, this function will take care of
666 * mapping the MMIO memory (if present). Upon successful return
667 * the native code should set up the interrupt handler.
668 *
669 * @returns VBox status code.
670 *
671 * @param pDevExt The device extension. Allocated by the native code.
672 * @param IOPortBase The base of the I/O port range.
673 * @param pvMMIOBase The base of the MMIO memory mapping.
674 * This is optional, pass NULL if not present.
675 * @param cbMMIO The size of the MMIO memory mapping.
676 * This is optional, pass 0 if not present.
677 * @param enmOSType The guest OS type to report to the VMMDev.
678 * @param fFixedEvents Events that will be enabled upon init and no client
679 * will ever be allowed to mask.
680 */
681int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
682 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
683{
684 int rc, rc2;
685 unsigned i;
686
687 /*
688 * Adjust fFixedEvents.
689 */
690#ifdef VBOX_WITH_HGCM
691 fFixedEvents |= VMMDEV_EVENT_HGCM;
692#endif
693
694 /*
695 * Initialize the data.
696 */
697 pDevExt->IOPortBase = IOPortBase;
698 pDevExt->pVMMDevMemory = NULL;
699 pDevExt->fFixedEvents = fFixedEvents;
700 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
701 pDevExt->EventSpinlock = NIL_RTSPINLOCK;
702 pDevExt->pIrqAckEvents = NULL;
703 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
704 RTListInit(&pDevExt->WaitList);
705#ifdef VBOX_WITH_HGCM
706 RTListInit(&pDevExt->HGCMWaitList);
707#endif
708#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
709 RTListInit(&pDevExt->WakeUpList);
710#endif
711 RTListInit(&pDevExt->WokenUpList);
712 RTListInit(&pDevExt->FreeList);
713#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
714 pDevExt->fVRDPEnabled = false;
715#endif
716 pDevExt->fLoggingEnabled = false;
717 pDevExt->f32PendingEvents = 0;
718 pDevExt->u32MousePosChangedSeq = 0;
719 pDevExt->SessionSpinlock = NIL_RTSPINLOCK;
720 pDevExt->MemBalloon.hMtx = NIL_RTSEMFASTMUTEX;
721 pDevExt->MemBalloon.cChunks = 0;
722 pDevExt->MemBalloon.cMaxChunks = 0;
723 pDevExt->MemBalloon.fUseKernelAPI = true;
724 pDevExt->MemBalloon.paMemObj = NULL;
725 pDevExt->MemBalloon.pOwner = NULL;
726 for (i = 0; i < RT_ELEMENTS(pDevExt->acMouseFeatureUsage); ++i)
727 pDevExt->acMouseFeatureUsage[i] = 0;
728 pDevExt->fMouseStatus = 0;
729 pDevExt->MouseNotifyCallback.pfnNotify = NULL;
730 pDevExt->MouseNotifyCallback.pvUser = NULL;
731 pDevExt->cISR = 0;
732
733 /*
734 * If there is an MMIO region validate the version and size.
735 */
736 if (pvMMIOBase)
737 {
738 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
739 Assert(cbMMIO);
740 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
741 && pVMMDev->u32Size >= 32
742 && pVMMDev->u32Size <= cbMMIO)
743 {
744 pDevExt->pVMMDevMemory = pVMMDev;
745 Log(("VBoxGuestInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
746 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
747 }
748 else /* try live without it. */
749 LogRel(("VBoxGuestInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
750 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
751 }
752
753 /*
754 * Create the wait and session spinlocks as well as the ballooning mutex.
755 */
756 rc = RTSpinlockCreate(&pDevExt->EventSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestEvent");
757 if (RT_SUCCESS(rc))
758 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestSession");
759 if (RT_FAILURE(rc))
760 {
761 LogRel(("VBoxGuestInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
762 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
763 RTSpinlockDestroy(pDevExt->EventSpinlock);
764 return rc;
765 }
766
767 rc = RTSemFastMutexCreate(&pDevExt->MemBalloon.hMtx);
768 if (RT_FAILURE(rc))
769 {
770 LogRel(("VBoxGuestInitDevExt: failed to create mutex, rc=%Rrc!\n", rc));
771 RTSpinlockDestroy(pDevExt->SessionSpinlock);
772 RTSpinlockDestroy(pDevExt->EventSpinlock);
773 return rc;
774 }
775
776 /*
777 * Initialize the guest library and report the guest info back to VMMDev,
778 * set the interrupt control filter mask, and fixate the guest mappings
779 * made by the VMM.
780 */
781 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
782 if (RT_SUCCESS(rc))
783 {
784 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
785 if (RT_SUCCESS(rc))
786 {
787 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
788 Assert(pDevExt->PhysIrqAckEvents != 0);
789
790 rc = VBoxGuestReportGuestInfo(enmOSType);
791 if (RT_SUCCESS(rc))
792 {
793 rc = vboxGuestSetFilterMask(pDevExt, fFixedEvents);
794 if (RT_SUCCESS(rc))
795 {
796 /*
797 * Disable guest graphics capability by default. The guest specific
798 * graphics driver will re-enable this when it is necessary.
799 */
800 rc = VBoxGuestSetGuestCapabilities(0, VMMDEV_GUEST_SUPPORTS_GRAPHICS);
801 if (RT_SUCCESS(rc))
802 {
803 vboxGuestInitFixateGuestMappings(pDevExt);
804
805#ifdef DEBUG
806 testSetMouseStatus(); /* Other tests? */
807#endif
808
809 rc = VBoxGuestReportDriverStatus(true /* Driver is active */);
810 if (RT_FAILURE(rc))
811 LogRel(("VBoxGuestInitDevExt: VBoxReportGuestDriverStatus failed, rc=%Rrc\n", rc));
812
813 Log(("VBoxGuestInitDevExt: returns success\n"));
814 return VINF_SUCCESS;
815 }
816
817 LogRel(("VBoxGuestInitDevExt: VBoxGuestSetGuestCapabilities failed, rc=%Rrc\n", rc));
818 }
819 else
820 LogRel(("VBoxGuestInitDevExt: vboxGuestSetFilterMask failed, rc=%Rrc\n", rc));
821 }
822 else
823 LogRel(("VBoxGuestInitDevExt: VBoxReportGuestInfo failed, rc=%Rrc\n", rc));
824 VbglGRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
825 }
826 else
827 LogRel(("VBoxGuestInitDevExt: VBoxGRAlloc failed, rc=%Rrc\n", rc));
828
829 VbglTerminate();
830 }
831 else
832 LogRel(("VBoxGuestInitDevExt: VbglInit failed, rc=%Rrc\n", rc));
833
834 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
835 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
836 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
837 return rc; /* (failed) */
838}
839
840
841/**
842 * Deletes all the items in a wait chain.
843 * @param pList The head of the chain.
844 */
845static void VBoxGuestDeleteWaitList(PRTLISTNODE pList)
846{
847 while (!RTListIsEmpty(pList))
848 {
849 int rc2;
850 PVBOXGUESTWAIT pWait = RTListGetFirst(pList, VBOXGUESTWAIT, ListNode);
851 RTListNodeRemove(&pWait->ListNode);
852
853 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
854 pWait->Event = NIL_RTSEMEVENTMULTI;
855 pWait->pSession = NULL;
856 RTMemFree(pWait);
857 }
858}
859
860
861/**
862 * Destroys the VBoxGuest device extension.
863 *
864 * The native code should call this before the driver is loaded,
865 * but don't call this on shutdown.
866 *
867 * @param pDevExt The device extension.
868 */
869void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
870{
871 int rc2;
872 Log(("VBoxGuestDeleteDevExt:\n"));
873 Log(("VBoxGuest: The additions driver is terminating.\n"));
874
875 /*
876 * Clean up the bits that involves the host first.
877 */
878 vboxGuestTermUnfixGuestMappings(pDevExt);
879 VBoxGuestSetGuestCapabilities(0, UINT32_MAX); /* clears all capabilities */
880 vboxGuestSetFilterMask(pDevExt, 0); /* filter all events */
881 vboxGuestCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
882
883 /*
884 * Cleanup all the other resources.
885 */
886 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
887 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
888 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
889
890 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
891#ifdef VBOX_WITH_HGCM
892 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
893#endif
894#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
895 VBoxGuestDeleteWaitList(&pDevExt->WakeUpList);
896#endif
897 VBoxGuestDeleteWaitList(&pDevExt->WokenUpList);
898 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
899
900 VbglTerminate();
901
902 pDevExt->pVMMDevMemory = NULL;
903
904 pDevExt->IOPortBase = 0;
905 pDevExt->pIrqAckEvents = NULL;
906}
907
908
909/**
910 * Creates a VBoxGuest user session.
911 *
912 * The native code calls this when a ring-3 client opens the device.
913 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
914 *
915 * @returns VBox status code.
916 * @param pDevExt The device extension.
917 * @param ppSession Where to store the session on success.
918 */
919int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
920{
921 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
922 if (RT_UNLIKELY(!pSession))
923 {
924 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
925 return VERR_NO_MEMORY;
926 }
927
928 pSession->Process = RTProcSelf();
929 pSession->R0Process = RTR0ProcHandleSelf();
930 pSession->pDevExt = pDevExt;
931
932 *ppSession = pSession;
933 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
934 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
935 return VINF_SUCCESS;
936}
937
938
939/**
940 * Creates a VBoxGuest kernel session.
941 *
942 * The native code calls this when a ring-0 client connects to the device.
943 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
944 *
945 * @returns VBox status code.
946 * @param pDevExt The device extension.
947 * @param ppSession Where to store the session on success.
948 */
949int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
950{
951 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
952 if (RT_UNLIKELY(!pSession))
953 {
954 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
955 return VERR_NO_MEMORY;
956 }
957
958 pSession->Process = NIL_RTPROCESS;
959 pSession->R0Process = NIL_RTR0PROCESS;
960 pSession->pDevExt = pDevExt;
961
962 *ppSession = pSession;
963 LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
964 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
965 return VINF_SUCCESS;
966}
967
968
969
970/**
971 * Closes a VBoxGuest session.
972 *
973 * @param pDevExt The device extension.
974 * @param pSession The session to close (and free).
975 */
976void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
977{
978 unsigned i; NOREF(i);
979 Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
980 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
981
982#ifdef VBOX_WITH_HGCM
983 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
984 if (pSession->aHGCMClientIds[i])
985 {
986 VBoxGuestHGCMDisconnectInfo Info;
987 Info.result = 0;
988 Info.u32ClientID = pSession->aHGCMClientIds[i];
989 pSession->aHGCMClientIds[i] = 0;
990 Log(("VBoxGuestCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
991 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
992 }
993#endif
994
995 pSession->pDevExt = NULL;
996 pSession->Process = NIL_RTPROCESS;
997 pSession->R0Process = NIL_RTR0PROCESS;
998 vboxGuestCloseMemBalloon(pDevExt, pSession);
999 /* Reset any mouse status flags which the session may have set. */
1000 VBoxGuestCommonIOCtl_SetMouseStatus(pDevExt, pSession, 0);
1001 RTMemFree(pSession);
1002}
1003
1004
1005/**
1006 * Allocates a wait-for-event entry.
1007 *
1008 * @returns The wait-for-event entry.
1009 * @param pDevExt The device extension.
1010 * @param pSession The session that's allocating this. Can be NULL.
1011 */
1012static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1013{
1014 /*
1015 * Allocate it one way or the other.
1016 */
1017 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1018 if (pWait)
1019 {
1020 RTSpinlockAcquire(pDevExt->EventSpinlock);
1021
1022 pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1023 if (pWait)
1024 RTListNodeRemove(&pWait->ListNode);
1025
1026 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1027 }
1028 if (!pWait)
1029 {
1030 static unsigned s_cErrors = 0;
1031 int rc;
1032
1033 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1034 if (!pWait)
1035 {
1036 if (s_cErrors++ < 32)
1037 LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
1038 return NULL;
1039 }
1040
1041 rc = RTSemEventMultiCreate(&pWait->Event);
1042 if (RT_FAILURE(rc))
1043 {
1044 if (s_cErrors++ < 32)
1045 LogRel(("VBoxGuestCommonIOCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
1046 RTMemFree(pWait);
1047 return NULL;
1048 }
1049
1050 pWait->ListNode.pNext = NULL;
1051 pWait->ListNode.pPrev = NULL;
1052 }
1053
1054 /*
1055 * Zero members just as an precaution.
1056 */
1057 pWait->fReqEvents = 0;
1058 pWait->fResEvents = 0;
1059#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1060 pWait->fPendingWakeUp = false;
1061 pWait->fFreeMe = false;
1062#endif
1063 pWait->pSession = pSession;
1064#ifdef VBOX_WITH_HGCM
1065 pWait->pHGCMReq = NULL;
1066#endif
1067 RTSemEventMultiReset(pWait->Event);
1068 return pWait;
1069}
1070
1071
1072/**
1073 * Frees the wait-for-event entry.
1074 *
1075 * The caller must own the wait spinlock !
1076 * The entry must be in a list!
1077 *
1078 * @param pDevExt The device extension.
1079 * @param pWait The wait-for-event entry to free.
1080 */
1081static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1082{
1083 pWait->fReqEvents = 0;
1084 pWait->fResEvents = 0;
1085#ifdef VBOX_WITH_HGCM
1086 pWait->pHGCMReq = NULL;
1087#endif
1088#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1089 Assert(!pWait->fFreeMe);
1090 if (pWait->fPendingWakeUp)
1091 pWait->fFreeMe = true;
1092 else
1093#endif
1094 {
1095 RTListNodeRemove(&pWait->ListNode);
1096 RTListAppend(&pDevExt->FreeList, &pWait->ListNode);
1097 }
1098}
1099
1100
1101/**
1102 * Frees the wait-for-event entry.
1103 *
1104 * @param pDevExt The device extension.
1105 * @param pWait The wait-for-event entry to free.
1106 */
1107static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1108{
1109 RTSpinlockAcquire(pDevExt->EventSpinlock);
1110 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1111 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1112}
1113
1114
1115#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1116/**
1117 * Processes the wake-up list.
1118 *
1119 * All entries in the wake-up list gets signalled and moved to the woken-up
1120 * list.
1121 *
1122 * @param pDevExt The device extension.
1123 */
1124void VBoxGuestWaitDoWakeUps(PVBOXGUESTDEVEXT pDevExt)
1125{
1126 if (!RTListIsEmpty(&pDevExt->WakeUpList))
1127 {
1128 RTSpinlockAcquire(pDevExt->EventSpinlock);
1129 for (;;)
1130 {
1131 int rc;
1132 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->WakeUpList, VBOXGUESTWAIT, ListNode);
1133 if (!pWait)
1134 break;
1135 pWait->fPendingWakeUp = true;
1136 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1137
1138 rc = RTSemEventMultiSignal(pWait->Event);
1139 AssertRC(rc);
1140
1141 RTSpinlockAcquire(pDevExt->EventSpinlock);
1142 pWait->fPendingWakeUp = false;
1143 if (!pWait->fFreeMe)
1144 {
1145 RTListNodeRemove(&pWait->ListNode);
1146 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1147 }
1148 else
1149 {
1150 pWait->fFreeMe = false;
1151 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1152 }
1153 }
1154 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1155 }
1156}
1157#endif /* VBOXGUEST_USE_DEFERRED_WAKE_UP */
1158
1159
1160/**
1161 * Modifies the guest capabilities.
1162 *
1163 * Should be called during driver init and termination.
1164 *
1165 * @returns VBox status code.
1166 * @param fOr The Or mask (what to enable).
1167 * @param fNot The Not mask (what to disable).
1168 */
1169int VBoxGuestSetGuestCapabilities(uint32_t fOr, uint32_t fNot)
1170{
1171 VMMDevReqGuestCapabilities2 *pReq;
1172 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
1173 if (RT_FAILURE(rc))
1174 {
1175 Log(("VBoxGuestSetGuestCapabilities: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1176 sizeof(*pReq), sizeof(*pReq), rc));
1177 return rc;
1178 }
1179
1180 pReq->u32OrMask = fOr;
1181 pReq->u32NotMask = fNot;
1182
1183 rc = VbglGRPerform(&pReq->header);
1184 if (RT_FAILURE(rc))
1185 Log(("VBoxGuestSetGuestCapabilities: VbglGRPerform failed, rc=%Rrc!\n", rc));
1186
1187 VbglGRFree(&pReq->header);
1188 return rc;
1189}
1190
1191
1192/**
1193 * Implements the fast (no input or output) type of IOCtls.
1194 *
1195 * This is currently just a placeholder stub inherited from the support driver code.
1196 *
1197 * @returns VBox status code.
1198 * @param iFunction The IOCtl function number.
1199 * @param pDevExt The device extension.
1200 * @param pSession The session.
1201 */
1202int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1203{
1204 Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1205
1206 NOREF(iFunction);
1207 NOREF(pDevExt);
1208 NOREF(pSession);
1209 return VERR_NOT_SUPPORTED;
1210}
1211
1212
1213/**
1214 * Return the VMM device port.
1215 *
1216 * returns IPRT status code.
1217 * @param pDevExt The device extension.
1218 * @param pInfo The request info.
1219 * @param pcbDataReturned (out) contains the number of bytes to return.
1220 */
1221static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
1222{
1223 Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
1224 pInfo->portAddress = pDevExt->IOPortBase;
1225 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
1226 if (pcbDataReturned)
1227 *pcbDataReturned = sizeof(*pInfo);
1228 return VINF_SUCCESS;
1229}
1230
1231
1232#ifndef RT_OS_WINDOWS
1233/**
1234 * Set the callback for the kernel mouse handler.
1235 *
1236 * returns IPRT status code.
1237 * @param pDevExt The device extension.
1238 * @param pNotify The new callback information.
1239 * @note This function takes the session spinlock to update the callback
1240 * information, but the interrupt handler will not do this. To make
1241 * sure that the interrupt handler sees a consistent structure, we
1242 * set the function pointer to NULL before updating the data and only
1243 * set it to the correct value once the data is updated. Since the
1244 * interrupt handler executes atomically this ensures that the data is
1245 * valid if the function pointer is non-NULL.
1246 */
1247int VBoxGuestCommonIOCtl_SetMouseNotifyCallback(PVBOXGUESTDEVEXT pDevExt, VBoxGuestMouseSetNotifyCallback *pNotify)
1248{
1249 Log(("VBoxGuestCommonIOCtl: SET_MOUSE_NOTIFY_CALLBACK\n"));
1250
1251 RTSpinlockAcquire(pDevExt->EventSpinlock);
1252 pDevExt->MouseNotifyCallback = *pNotify;
1253 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1254
1255 /* Make sure no active ISR is referencing the old data - hacky but should be
1256 * effective. */
1257 while (pDevExt->cISR > 0)
1258 ASMNopPause();
1259
1260 return VINF_SUCCESS;
1261}
1262#endif
1263
1264
1265/**
1266 * Worker VBoxGuestCommonIOCtl_WaitEvent.
1267 *
1268 * The caller enters the spinlock, we leave it.
1269 *
1270 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
1271 */
1272DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWaitEventInfo *pInfo,
1273 int iEvent, const uint32_t fReqEvents)
1274{
1275 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
1276 if (fMatches)
1277 {
1278 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
1279 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1280
1281 pInfo->u32EventFlagsOut = fMatches;
1282 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1283 if (fReqEvents & ~((uint32_t)1 << iEvent))
1284 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1285 else
1286 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1287 return VINF_SUCCESS;
1288 }
1289 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1290 return VERR_TIMEOUT;
1291}
1292
1293
1294static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1295 VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned, bool fInterruptible)
1296{
1297 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
1298 uint32_t fResEvents;
1299 int iEvent;
1300 PVBOXGUESTWAIT pWait;
1301 int rc;
1302
1303 pInfo->u32EventFlagsOut = 0;
1304 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1305 if (pcbDataReturned)
1306 *pcbDataReturned = sizeof(*pInfo);
1307
1308 /*
1309 * Copy and verify the input mask.
1310 */
1311 iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
1312 if (RT_UNLIKELY(iEvent < 0))
1313 {
1314 Log(("VBoxGuestCommonIOCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
1315 return VERR_INVALID_PARAMETER;
1316 }
1317
1318 /*
1319 * Check the condition up front, before doing the wait-for-event allocations.
1320 */
1321 RTSpinlockAcquire(pDevExt->EventSpinlock);
1322 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents);
1323 if (rc == VINF_SUCCESS)
1324 return rc;
1325
1326 if (!pInfo->u32TimeoutIn)
1327 {
1328 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1329 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
1330 return VERR_TIMEOUT;
1331 }
1332
1333 pWait = VBoxGuestWaitAlloc(pDevExt, pSession);
1334 if (!pWait)
1335 return VERR_NO_MEMORY;
1336 pWait->fReqEvents = fReqEvents;
1337
1338 /*
1339 * We've got the wait entry now, re-enter the spinlock and check for the condition.
1340 * If the wait condition is met, return.
1341 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
1342 */
1343 RTSpinlockAcquire(pDevExt->EventSpinlock);
1344 RTListAppend(&pDevExt->WaitList, &pWait->ListNode);
1345 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents);
1346 if (rc == VINF_SUCCESS)
1347 {
1348 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1349 return rc;
1350 }
1351
1352 if (fInterruptible)
1353 rc = RTSemEventMultiWaitNoResume(pWait->Event,
1354 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1355 else
1356 rc = RTSemEventMultiWait(pWait->Event,
1357 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1358
1359 /*
1360 * There is one special case here and that's when the semaphore is
1361 * destroyed upon device driver unload. This shouldn't happen of course,
1362 * but in case it does, just get out of here ASAP.
1363 */
1364 if (rc == VERR_SEM_DESTROYED)
1365 return rc;
1366
1367 /*
1368 * Unlink the wait item and dispose of it.
1369 */
1370 RTSpinlockAcquire(pDevExt->EventSpinlock);
1371 fResEvents = pWait->fResEvents;
1372 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1373 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1374
1375 /*
1376 * Now deal with the return code.
1377 */
1378 if ( fResEvents
1379 && fResEvents != UINT32_MAX)
1380 {
1381 pInfo->u32EventFlagsOut = fResEvents;
1382 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1383 if (fReqEvents & ~((uint32_t)1 << iEvent))
1384 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1385 else
1386 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1387 rc = VINF_SUCCESS;
1388 }
1389 else if ( fResEvents == UINT32_MAX
1390 || rc == VERR_INTERRUPTED)
1391 {
1392 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
1393 rc = VERR_INTERRUPTED;
1394 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
1395 }
1396 else if (rc == VERR_TIMEOUT)
1397 {
1398 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1399 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT (2)\n"));
1400 }
1401 else
1402 {
1403 if (RT_SUCCESS(rc))
1404 {
1405 static unsigned s_cErrors = 0;
1406 if (s_cErrors++ < 32)
1407 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
1408 rc = VERR_INTERNAL_ERROR;
1409 }
1410 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1411 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc\n", rc));
1412 }
1413
1414 return rc;
1415}
1416
1417
1418static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1419{
1420 PVBOXGUESTWAIT pWait;
1421 PVBOXGUESTWAIT pSafe;
1422 int rc = 0;
1423
1424 Log(("VBoxGuestCommonIOCtl: CANCEL_ALL_WAITEVENTS\n"));
1425
1426 /*
1427 * Walk the event list and wake up anyone with a matching session.
1428 */
1429 RTSpinlockAcquire(pDevExt->EventSpinlock);
1430 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
1431 {
1432 if (pWait->pSession == pSession)
1433 {
1434 pWait->fResEvents = UINT32_MAX;
1435 RTListNodeRemove(&pWait->ListNode);
1436#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1437 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
1438#else
1439 rc |= RTSemEventMultiSignal(pWait->Event);
1440 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1441#endif
1442 }
1443 }
1444 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1445 Assert(rc == 0);
1446
1447#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1448 VBoxGuestWaitDoWakeUps(pDevExt);
1449#endif
1450
1451 return VINF_SUCCESS;
1452}
1453
1454/**
1455 * Checks if the VMM request is allowed in the context of the given session.
1456 *
1457 * @returns VINF_SUCCESS or VERR_PERMISSION_DENIED.
1458 * @param pSession The calling session.
1459 * @param enmType The request type.
1460 * @param pReqHdr The request.
1461 */
1462static int VBoxGuestCheckIfVMMReqAllowed(PVBOXGUESTSESSION pSession, VMMDevRequestType enmType,
1463 VMMDevRequestHeader const *pReqHdr)
1464{
1465 /*
1466 * Categorize the request being made.
1467 */
1468 /** @todo This need quite some more work! */
1469 enum
1470 {
1471 kLevel_Invalid, kLevel_NoOne, kLevel_OnlyVBoxGuest, kLevel_OnlyKernel, kLevel_TrustedUsers, kLevel_AllUsers
1472 } enmRequired;
1473 switch (enmType)
1474 {
1475 /*
1476 * Deny access to anything we don't know or provide specialized I/O controls for.
1477 */
1478#ifdef VBOX_WITH_HGCM
1479 case VMMDevReq_HGCMConnect:
1480 case VMMDevReq_HGCMDisconnect:
1481# ifdef VBOX_WITH_64_BITS_GUESTS
1482 case VMMDevReq_HGCMCall32:
1483 case VMMDevReq_HGCMCall64:
1484# else
1485 case VMMDevReq_HGCMCall:
1486# endif /* VBOX_WITH_64_BITS_GUESTS */
1487 case VMMDevReq_HGCMCancel:
1488 case VMMDevReq_HGCMCancel2:
1489#endif /* VBOX_WITH_HGCM */
1490 default:
1491 enmRequired = kLevel_NoOne;
1492 break;
1493
1494 /*
1495 * There are a few things only this driver can do (and it doesn't use
1496 * the VMMRequst I/O control route anyway, but whatever).
1497 */
1498 case VMMDevReq_ReportGuestInfo:
1499 case VMMDevReq_ReportGuestInfo2:
1500 case VMMDevReq_GetHypervisorInfo:
1501 case VMMDevReq_SetHypervisorInfo:
1502 case VMMDevReq_RegisterPatchMemory:
1503 case VMMDevReq_DeregisterPatchMemory:
1504 case VMMDevReq_GetMemBalloonChangeRequest:
1505 enmRequired = kLevel_OnlyVBoxGuest;
1506 break;
1507
1508 /*
1509 * Trusted users apps only.
1510 */
1511 case VMMDevReq_QueryCredentials:
1512 case VMMDevReq_ReportCredentialsJudgement:
1513 case VMMDevReq_RegisterSharedModule:
1514 case VMMDevReq_UnregisterSharedModule:
1515 case VMMDevReq_WriteCoreDump:
1516 case VMMDevReq_GetCpuHotPlugRequest:
1517 case VMMDevReq_SetCpuHotPlugStatus:
1518 case VMMDevReq_CheckSharedModules:
1519 case VMMDevReq_GetPageSharingStatus:
1520 case VMMDevReq_DebugIsPageShared:
1521 case VMMDevReq_ReportGuestStats:
1522 case VMMDevReq_GetStatisticsChangeRequest:
1523 case VMMDevReq_ChangeMemBalloon:
1524 enmRequired = kLevel_TrustedUsers;
1525 break;
1526
1527 /*
1528 * Anyone.
1529 */
1530 case VMMDevReq_GetMouseStatus:
1531 case VMMDevReq_SetMouseStatus:
1532 case VMMDevReq_SetPointerShape:
1533 case VMMDevReq_GetHostVersion:
1534 case VMMDevReq_Idle:
1535 case VMMDevReq_GetHostTime:
1536 case VMMDevReq_SetPowerStatus:
1537 case VMMDevReq_AcknowledgeEvents:
1538 case VMMDevReq_CtlGuestFilterMask:
1539 case VMMDevReq_ReportGuestStatus:
1540 case VMMDevReq_GetDisplayChangeRequest:
1541 case VMMDevReq_VideoModeSupported:
1542 case VMMDevReq_GetHeightReduction:
1543 case VMMDevReq_GetDisplayChangeRequest2:
1544 case VMMDevReq_SetGuestCapabilities:
1545 case VMMDevReq_VideoModeSupported2:
1546 case VMMDevReq_VideoAccelEnable:
1547 case VMMDevReq_VideoAccelFlush:
1548 case VMMDevReq_VideoSetVisibleRegion:
1549 case VMMDevReq_GetSeamlessChangeRequest:
1550 case VMMDevReq_GetVRDPChangeRequest:
1551 case VMMDevReq_LogString:
1552 case VMMDevReq_GetSessionId:
1553 enmRequired = kLevel_AllUsers;
1554 break;
1555
1556 /*
1557 * Depends on the request parameters...
1558 */
1559 /** @todo this have to be changed into an I/O control and the facilities
1560 * tracked in the session so they can automatically be failed when the
1561 * session terminates without reporting the new status.
1562 *
1563 * The information presented by IGuest is not reliable without this! */
1564 case VMMDevReq_ReportGuestCapabilities:
1565 switch (((VMMDevReportGuestStatus const *)pReqHdr)->guestStatus.facility)
1566 {
1567 case VBoxGuestFacilityType_All:
1568 case VBoxGuestFacilityType_VBoxGuestDriver:
1569 enmRequired = kLevel_OnlyVBoxGuest;
1570 break;
1571 case VBoxGuestFacilityType_VBoxService:
1572 enmRequired = kLevel_TrustedUsers;
1573 break;
1574 case VBoxGuestFacilityType_VBoxTrayClient:
1575 case VBoxGuestFacilityType_Seamless:
1576 case VBoxGuestFacilityType_Graphics:
1577 default:
1578 enmRequired = kLevel_AllUsers;
1579 break;
1580 }
1581 break;
1582 }
1583
1584 /*
1585 * Check against the session.
1586 */
1587 switch (enmRequired)
1588 {
1589 default:
1590 case kLevel_NoOne:
1591 break;
1592 case kLevel_OnlyVBoxGuest:
1593 case kLevel_OnlyKernel:
1594 if (pSession->R0Process == NIL_RTR0PROCESS)
1595 return VINF_SUCCESS;
1596 break;
1597 case kLevel_TrustedUsers:
1598 case kLevel_AllUsers:
1599 return VINF_SUCCESS;
1600 }
1601
1602 return VERR_PERMISSION_DENIED;
1603}
1604
1605static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1606 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
1607{
1608 int rc;
1609 VMMDevRequestHeader *pReqCopy;
1610
1611 /*
1612 * Validate the header and request size.
1613 */
1614 const VMMDevRequestType enmType = pReqHdr->requestType;
1615 const uint32_t cbReq = pReqHdr->size;
1616 const uint32_t cbMinSize = vmmdevGetRequestSize(enmType);
1617
1618 Log(("VBoxGuestCommonIOCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
1619
1620 if (cbReq < cbMinSize)
1621 {
1622 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
1623 cbReq, cbMinSize, enmType));
1624 return VERR_INVALID_PARAMETER;
1625 }
1626 if (cbReq > cbData)
1627 {
1628 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
1629 cbData, cbReq, enmType));
1630 return VERR_INVALID_PARAMETER;
1631 }
1632 rc = VbglGRVerify(pReqHdr, cbData);
1633 if (RT_FAILURE(rc))
1634 {
1635 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc!!\n",
1636 cbData, cbReq, enmType, rc));
1637 return rc;
1638 }
1639
1640 rc = VBoxGuestCheckIfVMMReqAllowed(pSession, enmType, pReqHdr);
1641 if (RT_FAILURE(rc))
1642 {
1643 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: Operation not allowed! type=%#x rc=%Rrc\n", enmType, rc));
1644 return rc;
1645 }
1646
1647 /*
1648 * Make a copy of the request in the physical memory heap so
1649 * the VBoxGuestLibrary can more easily deal with the request.
1650 * (This is really a waste of time since the OS or the OS specific
1651 * code has already buffered or locked the input/output buffer, but
1652 * it does makes things a bit simpler wrt to phys address.)
1653 */
1654 rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
1655 if (RT_FAILURE(rc))
1656 {
1657 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1658 cbReq, cbReq, rc));
1659 return rc;
1660 }
1661 memcpy(pReqCopy, pReqHdr, cbReq);
1662
1663 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
1664 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
1665
1666 rc = VbglGRPerform(pReqCopy);
1667 if ( RT_SUCCESS(rc)
1668 && RT_SUCCESS(pReqCopy->rc))
1669 {
1670 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
1671 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
1672
1673 memcpy(pReqHdr, pReqCopy, cbReq);
1674 if (pcbDataReturned)
1675 *pcbDataReturned = cbReq;
1676 }
1677 else if (RT_FAILURE(rc))
1678 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
1679 else
1680 {
1681 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
1682 rc = pReqCopy->rc;
1683 }
1684
1685 VbglGRFree(pReqCopy);
1686 return rc;
1687}
1688
1689
1690static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, VBoxGuestFilterMaskInfo *pInfo)
1691{
1692 VMMDevCtlGuestFilterMask *pReq;
1693 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
1694 if (RT_FAILURE(rc))
1695 {
1696 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1697 sizeof(*pReq), sizeof(*pReq), rc));
1698 return rc;
1699 }
1700
1701 pReq->u32OrMask = pInfo->u32OrMask;
1702 pReq->u32NotMask = pInfo->u32NotMask;
1703 pReq->u32NotMask &= ~pDevExt->fFixedEvents; /* don't permit these to be cleared! */
1704 rc = VbglGRPerform(&pReq->header);
1705 if (RT_FAILURE(rc))
1706 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: VbglGRPerform failed, rc=%Rrc!\n", rc));
1707
1708 VbglGRFree(&pReq->header);
1709 return rc;
1710}
1711
1712#ifdef VBOX_WITH_HGCM
1713
1714AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
1715
1716/** Worker for VBoxGuestHGCMAsyncWaitCallback*. */
1717static int VBoxGuestHGCMAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
1718 bool fInterruptible, uint32_t cMillies)
1719{
1720 int rc;
1721
1722 /*
1723 * Check to see if the condition was met by the time we got here.
1724 *
1725 * We create a simple poll loop here for dealing with out-of-memory
1726 * conditions since the caller isn't necessarily able to deal with
1727 * us returning too early.
1728 */
1729 PVBOXGUESTWAIT pWait;
1730 for (;;)
1731 {
1732 RTSpinlockAcquire(pDevExt->EventSpinlock);
1733 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1734 {
1735 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1736 return VINF_SUCCESS;
1737 }
1738 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1739
1740 pWait = VBoxGuestWaitAlloc(pDevExt, NULL);
1741 if (pWait)
1742 break;
1743 if (fInterruptible)
1744 return VERR_INTERRUPTED;
1745 RTThreadSleep(1);
1746 }
1747 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
1748 pWait->pHGCMReq = pHdr;
1749
1750 /*
1751 * Re-enter the spinlock and re-check for the condition.
1752 * If the condition is met, return.
1753 * Otherwise link us into the HGCM wait list and go to sleep.
1754 */
1755 RTSpinlockAcquire(pDevExt->EventSpinlock);
1756 RTListAppend(&pDevExt->HGCMWaitList, &pWait->ListNode);
1757 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1758 {
1759 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1760 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1761 return VINF_SUCCESS;
1762 }
1763 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1764
1765 if (fInterruptible)
1766 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
1767 else
1768 rc = RTSemEventMultiWait(pWait->Event, cMillies);
1769 if (rc == VERR_SEM_DESTROYED)
1770 return rc;
1771
1772 /*
1773 * Unlink, free and return.
1774 */
1775 if ( RT_FAILURE(rc)
1776 && rc != VERR_TIMEOUT
1777 && ( !fInterruptible
1778 || rc != VERR_INTERRUPTED))
1779 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
1780
1781 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1782 return rc;
1783}
1784
1785
1786/**
1787 * This is a callback for dealing with async waits.
1788 *
1789 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1790 */
1791static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
1792{
1793 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1794 Log(("VBoxGuestHGCMAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
1795 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1796 pDevExt,
1797 false /* fInterruptible */,
1798 u32User /* cMillies */);
1799}
1800
1801
1802/**
1803 * This is a callback for dealing with async waits with a timeout.
1804 *
1805 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1806 */
1807static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr,
1808 void *pvUser, uint32_t u32User)
1809{
1810 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1811 Log(("VBoxGuestHGCMAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
1812 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1813 pDevExt,
1814 true /* fInterruptible */,
1815 u32User /* cMillies */ );
1816
1817}
1818
1819
1820static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1821 VBoxGuestHGCMConnectInfo *pInfo, size_t *pcbDataReturned)
1822{
1823 int rc;
1824
1825 /*
1826 * The VbglHGCMConnect call will invoke the callback if the HGCM
1827 * call is performed in an ASYNC fashion. The function is not able
1828 * to deal with cancelled requests.
1829 */
1830 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
1831 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
1832 ? pInfo->Loc.u.host.achName : "<not local host>"));
1833
1834 rc = VbglR0HGCMInternalConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1835 if (RT_SUCCESS(rc))
1836 {
1837 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
1838 pInfo->u32ClientID, pInfo->result, rc));
1839 if (RT_SUCCESS(pInfo->result))
1840 {
1841 /*
1842 * Append the client id to the client id table.
1843 * If the table has somehow become filled up, we'll disconnect the session.
1844 */
1845 unsigned i;
1846 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1847 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1848 if (!pSession->aHGCMClientIds[i])
1849 {
1850 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
1851 break;
1852 }
1853 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
1854 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1855 {
1856 static unsigned s_cErrors = 0;
1857 VBoxGuestHGCMDisconnectInfo Info;
1858
1859 if (s_cErrors++ < 32)
1860 LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
1861
1862 Info.result = 0;
1863 Info.u32ClientID = pInfo->u32ClientID;
1864 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1865 return VERR_TOO_MANY_OPEN_FILES;
1866 }
1867 }
1868 if (pcbDataReturned)
1869 *pcbDataReturned = sizeof(*pInfo);
1870 }
1871 return rc;
1872}
1873
1874
1875static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
1876 size_t *pcbDataReturned)
1877{
1878 /*
1879 * Validate the client id and invalidate its entry while we're in the call.
1880 */
1881 int rc;
1882 const uint32_t u32ClientId = pInfo->u32ClientID;
1883 unsigned i;
1884 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1885 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1886 if (pSession->aHGCMClientIds[i] == u32ClientId)
1887 {
1888 pSession->aHGCMClientIds[i] = UINT32_MAX;
1889 break;
1890 }
1891 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
1892 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1893 {
1894 static unsigned s_cErrors = 0;
1895 if (s_cErrors++ > 32)
1896 LogRel(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
1897 return VERR_INVALID_HANDLE;
1898 }
1899
1900 /*
1901 * The VbglHGCMConnect call will invoke the callback if the HGCM
1902 * call is performed in an ASYNC fashion. The function is not able
1903 * to deal with cancelled requests.
1904 */
1905 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
1906 rc = VbglR0HGCMInternalDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1907 if (RT_SUCCESS(rc))
1908 {
1909 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
1910 if (pcbDataReturned)
1911 *pcbDataReturned = sizeof(*pInfo);
1912 }
1913
1914 /* Update the client id array according to the result. */
1915 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1916 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
1917 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
1918 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
1919
1920 return rc;
1921}
1922
1923
1924static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt,
1925 PVBOXGUESTSESSION pSession,
1926 VBoxGuestHGCMCallInfo *pInfo,
1927 uint32_t cMillies, bool fInterruptible, bool f32bit, bool fUserData,
1928 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
1929{
1930 const uint32_t u32ClientId = pInfo->u32ClientID;
1931 uint32_t fFlags;
1932 size_t cbActual;
1933 unsigned i;
1934 int rc;
1935
1936 /*
1937 * Some more validations.
1938 */
1939 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
1940 {
1941 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
1942 return VERR_INVALID_PARAMETER;
1943 }
1944
1945 cbActual = cbExtra + sizeof(*pInfo);
1946#ifdef RT_ARCH_AMD64
1947 if (f32bit)
1948 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
1949 else
1950#endif
1951 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
1952 if (cbData < cbActual)
1953 {
1954 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
1955 cbData, cbActual));
1956 return VERR_INVALID_PARAMETER;
1957 }
1958
1959 /*
1960 * Validate the client id.
1961 */
1962 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1963 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1964 if (pSession->aHGCMClientIds[i] == u32ClientId)
1965 break;
1966 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
1967 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
1968 {
1969 static unsigned s_cErrors = 0;
1970 if (s_cErrors++ > 32)
1971 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
1972 return VERR_INVALID_HANDLE;
1973 }
1974
1975 /*
1976 * The VbglHGCMCall call will invoke the callback if the HGCM
1977 * call is performed in an ASYNC fashion. This function can
1978 * deal with cancelled requests, so we let user more requests
1979 * be interruptible (should add a flag for this later I guess).
1980 */
1981 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
1982 fFlags = !fUserData && pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
1983#ifdef RT_ARCH_AMD64
1984 if (f32bit)
1985 {
1986 if (fInterruptible)
1987 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1988 else
1989 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1990 }
1991 else
1992#endif
1993 {
1994 if (fInterruptible)
1995 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1996 else
1997 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1998 }
1999 if (RT_SUCCESS(rc))
2000 {
2001 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
2002 if (pcbDataReturned)
2003 *pcbDataReturned = cbActual;
2004 }
2005 else
2006 {
2007 if ( rc != VERR_INTERRUPTED
2008 && rc != VERR_TIMEOUT)
2009 {
2010 static unsigned s_cErrors = 0;
2011 if (s_cErrors++ < 32)
2012 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
2013 }
2014 else
2015 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
2016 }
2017 return rc;
2018}
2019
2020
2021#endif /* VBOX_WITH_HGCM */
2022
2023/**
2024 * Handle VBOXGUEST_IOCTL_CHECK_BALLOON from R3.
2025 *
2026 * Ask the host for the size of the balloon and try to set it accordingly. If
2027 * this approach fails because it's not supported, return with fHandleInR3 set
2028 * and let the user land supply memory we can lock via the other ioctl.
2029 *
2030 * @returns VBox status code.
2031 *
2032 * @param pDevExt The device extension.
2033 * @param pSession The session.
2034 * @param pInfo The output buffer.
2035 * @param pcbDataReturned Where to store the amount of returned data. Can
2036 * be NULL.
2037 */
2038static int VBoxGuestCommonIOCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2039 VBoxGuestCheckBalloonInfo *pInfo, size_t *pcbDataReturned)
2040{
2041 VMMDevGetMemBalloonChangeRequest *pReq;
2042 int rc;
2043
2044 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON\n"));
2045 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2046 AssertRCReturn(rc, rc);
2047
2048 /*
2049 * The first user trying to query/change the balloon becomes the
2050 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
2051 */
2052 if ( pDevExt->MemBalloon.pOwner != pSession
2053 && pDevExt->MemBalloon.pOwner == NULL)
2054 pDevExt->MemBalloon.pOwner = pSession;
2055
2056 if (pDevExt->MemBalloon.pOwner == pSession)
2057 {
2058 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
2059 if (RT_SUCCESS(rc))
2060 {
2061 /*
2062 * This is a response to that event. Setting this bit means that
2063 * we request the value from the host and change the guest memory
2064 * balloon according to this value.
2065 */
2066 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
2067 rc = VbglGRPerform(&pReq->header);
2068 if (RT_SUCCESS(rc))
2069 {
2070 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
2071 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
2072
2073 pInfo->cBalloonChunks = pReq->cBalloonChunks;
2074 pInfo->fHandleInR3 = false;
2075
2076 rc = vboxGuestSetBalloonSizeKernel(pDevExt, pReq->cBalloonChunks, &pInfo->fHandleInR3);
2077 /* Ignore various out of memory failures. */
2078 if ( rc == VERR_NO_MEMORY
2079 || rc == VERR_NO_PHYS_MEMORY
2080 || rc == VERR_NO_CONT_MEMORY)
2081 rc = VINF_SUCCESS;
2082
2083 if (pcbDataReturned)
2084 *pcbDataReturned = sizeof(VBoxGuestCheckBalloonInfo);
2085 }
2086 else
2087 LogRel(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON: VbglGRPerform failed. rc=%Rrc\n", rc));
2088 VbglGRFree(&pReq->header);
2089 }
2090 }
2091 else
2092 rc = VERR_PERMISSION_DENIED;
2093
2094 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2095 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON returns %Rrc\n", rc));
2096 return rc;
2097}
2098
2099
2100/**
2101 * Handle a request for changing the memory balloon.
2102 *
2103 * @returns VBox status code.
2104 *
2105 * @param pDevExt The device extention.
2106 * @param pSession The session.
2107 * @param pInfo The change request structure (input).
2108 * @param pcbDataReturned Where to store the amount of returned data. Can
2109 * be NULL.
2110 */
2111static int VBoxGuestCommonIOCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2112 VBoxGuestChangeBalloonInfo *pInfo, size_t *pcbDataReturned)
2113{
2114 int rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2115 AssertRCReturn(rc, rc);
2116
2117 if (!pDevExt->MemBalloon.fUseKernelAPI)
2118 {
2119 /*
2120 * The first user trying to query/change the balloon becomes the
2121 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
2122 */
2123 if ( pDevExt->MemBalloon.pOwner != pSession
2124 && pDevExt->MemBalloon.pOwner == NULL)
2125 pDevExt->MemBalloon.pOwner = pSession;
2126
2127 if (pDevExt->MemBalloon.pOwner == pSession)
2128 {
2129 rc = vboxGuestSetBalloonSizeFromUser(pDevExt, pSession, pInfo->u64ChunkAddr, !!pInfo->fInflate);
2130 if (pcbDataReturned)
2131 *pcbDataReturned = 0;
2132 }
2133 else
2134 rc = VERR_PERMISSION_DENIED;
2135 }
2136 else
2137 rc = VERR_PERMISSION_DENIED;
2138
2139 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2140 return rc;
2141}
2142
2143
2144/**
2145 * Handle a request for writing a core dump of the guest on the host.
2146 *
2147 * @returns VBox status code.
2148 *
2149 * @param pDevExt The device extension.
2150 * @param pInfo The output buffer.
2151 */
2152static int VBoxGuestCommonIOCtl_WriteCoreDump(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWriteCoreDump *pInfo)
2153{
2154 VMMDevReqWriteCoreDump *pReq = NULL;
2155 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_WriteCoreDump);
2156 if (RT_FAILURE(rc))
2157 {
2158 Log(("VBoxGuestCommonIOCtl: WRITE_CORE_DUMP: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
2159 sizeof(*pReq), sizeof(*pReq), rc));
2160 return rc;
2161 }
2162
2163 pReq->fFlags = pInfo->fFlags;
2164 rc = VbglGRPerform(&pReq->header);
2165 if (RT_FAILURE(rc))
2166 Log(("VBoxGuestCommonIOCtl: WRITE_CORE_DUMP: VbglGRPerform failed, rc=%Rrc!\n", rc));
2167
2168 VbglGRFree(&pReq->header);
2169 return rc;
2170}
2171
2172
2173#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
2174/**
2175 * Enables the VRDP session and saves its session ID.
2176 *
2177 * @returns VBox status code.
2178 *
2179 * @param pDevExt The device extention.
2180 * @param pSession The session.
2181 */
2182static int VBoxGuestCommonIOCtl_EnableVRDPSession(VBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2183{
2184 /* Nothing to do here right now, since this only is supported on Windows at the moment. */
2185 return VERR_NOT_IMPLEMENTED;
2186}
2187
2188
2189/**
2190 * Disables the VRDP session.
2191 *
2192 * @returns VBox status code.
2193 *
2194 * @param pDevExt The device extention.
2195 * @param pSession The session.
2196 */
2197static int VBoxGuestCommonIOCtl_DisableVRDPSession(VBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2198{
2199 /* Nothing to do here right now, since this only is supported on Windows at the moment. */
2200 return VERR_NOT_IMPLEMENTED;
2201}
2202#endif /* VBOX_WITH_VRDP_SESSION_HANDLING */
2203
2204#ifdef DEBUG
2205/** Unit test SetMouseStatus instead of really executing the request. */
2206static bool g_test_fSetMouseStatus = false;
2207/** When unit testing SetMouseStatus, the fake RC for the GR to return. */
2208static int g_test_SetMouseStatusGRRC;
2209/** When unit testing SetMouseStatus this will be set to the status passed to
2210 * the GR. */
2211static uint32_t g_test_statusSetMouseStatus;
2212#endif
2213
2214static int vboxguestcommonSetMouseStatus(uint32_t fFeatures)
2215{
2216 VMMDevReqMouseStatus *pReq;
2217 int rc;
2218
2219 LogRelFlowFunc(("fFeatures=%u\n", (int) fFeatures));
2220 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetMouseStatus);
2221 if (RT_SUCCESS(rc))
2222 {
2223 pReq->mouseFeatures = fFeatures;
2224 pReq->pointerXPos = 0;
2225 pReq->pointerYPos = 0;
2226#ifdef DEBUG
2227 if (g_test_fSetMouseStatus)
2228 {
2229 g_test_statusSetMouseStatus = pReq->mouseFeatures;
2230 rc = g_test_SetMouseStatusGRRC;
2231 }
2232 else
2233#endif
2234 rc = VbglGRPerform(&pReq->header);
2235 VbglGRFree(&pReq->header);
2236 }
2237 LogRelFlowFunc(("rc=%Rrc\n", rc));
2238 return rc;
2239}
2240
2241
2242/**
2243 * Sets the mouse status features for this session and updates them
2244 * globally. We aim to ensure that if several threads call this in
2245 * parallel the most recent status will always end up being set.
2246 *
2247 * @returns VBox status code.
2248 *
2249 * @param pDevExt The device extention.
2250 * @param pSession The session.
2251 * @param fFeatures New bitmap of enabled features.
2252 */
2253static int VBoxGuestCommonIOCtl_SetMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fFeatures)
2254{
2255 uint32_t fNewDevExtStatus = 0;
2256 unsigned i;
2257 int rc;
2258 /* Exit early if nothing has changed - hack to work around the
2259 * Windows Additions not using the common code. */
2260 bool fNoAction;
2261
2262 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2263
2264 for (i = 0; i < sizeof(fFeatures) * 8; i++)
2265 {
2266 if (RT_BIT_32(i) & VMMDEV_MOUSE_GUEST_MASK)
2267 {
2268 if ( (RT_BIT_32(i) & fFeatures)
2269 && !(RT_BIT_32(i) & pSession->fMouseStatus))
2270 pDevExt->acMouseFeatureUsage[i]++;
2271 else if ( !(RT_BIT_32(i) & fFeatures)
2272 && (RT_BIT_32(i) & pSession->fMouseStatus))
2273 pDevExt->acMouseFeatureUsage[i]--;
2274 }
2275 if (pDevExt->acMouseFeatureUsage[i] > 0)
2276 fNewDevExtStatus |= RT_BIT_32(i);
2277 }
2278
2279 pSession->fMouseStatus = fFeatures & VMMDEV_MOUSE_GUEST_MASK;
2280 fNoAction = (pDevExt->fMouseStatus == fNewDevExtStatus);
2281 pDevExt->fMouseStatus = fNewDevExtStatus;
2282
2283 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
2284 if (fNoAction)
2285 return VINF_SUCCESS;
2286
2287 do
2288 {
2289 fNewDevExtStatus = pDevExt->fMouseStatus;
2290 rc = vboxguestcommonSetMouseStatus(fNewDevExtStatus);
2291 } while ( RT_SUCCESS(rc)
2292 && fNewDevExtStatus != pDevExt->fMouseStatus);
2293
2294 return rc;
2295}
2296
2297
2298#ifdef DEBUG
2299/** Unit test for the SET_MOUSE_STATUS IoCtl. Since this is closely tied to
2300 * the code in question it probably makes most sense to keep it next to the
2301 * code. */
2302static void testSetMouseStatus(void)
2303{
2304 uint32_t u32Data;
2305 int rc;
2306 RTSPINLOCK Spinlock;
2307
2308 g_test_fSetMouseStatus = true;
2309 rc = RTSpinlockCreate(&Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestTest");
2310 AssertRCReturnVoid(rc);
2311 {
2312 VBOXGUESTDEVEXT DevExt = { 0 };
2313 VBOXGUESTSESSION Session = { 0 };
2314
2315 g_test_statusSetMouseStatus = ~0;
2316 g_test_SetMouseStatusGRRC = VINF_SUCCESS;
2317 DevExt.SessionSpinlock = Spinlock;
2318 u32Data = VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE;
2319 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2320 &Session, &u32Data, sizeof(u32Data), NULL);
2321 AssertRCSuccess(rc);
2322 AssertMsg( g_test_statusSetMouseStatus
2323 == VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE,
2324 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2325 DevExt.acMouseFeatureUsage[ASMBitFirstSetU32(VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR) - 1] = 1;
2326 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2327 &Session, &u32Data, sizeof(u32Data), NULL);
2328 AssertRCSuccess(rc);
2329 AssertMsg( g_test_statusSetMouseStatus
2330 == ( VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE
2331 | VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR),
2332 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2333 u32Data = VMMDEV_MOUSE_HOST_WANTS_ABSOLUTE; /* Can't change this */
2334 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2335 &Session, &u32Data, sizeof(u32Data), NULL);
2336 AssertRCSuccess(rc);
2337 AssertMsg( g_test_statusSetMouseStatus
2338 == VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR,
2339 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2340 u32Data = VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR;
2341 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2342 &Session, &u32Data, sizeof(u32Data), NULL);
2343 AssertRCSuccess(rc);
2344 AssertMsg( g_test_statusSetMouseStatus
2345 == VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR,
2346 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2347 u32Data = 0;
2348 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2349 &Session, &u32Data, sizeof(u32Data), NULL);
2350 AssertRCSuccess(rc);
2351 AssertMsg( g_test_statusSetMouseStatus
2352 == VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR,
2353 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2354 AssertMsg(DevExt.acMouseFeatureUsage[ASMBitFirstSetU32(VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR) - 1] == 1,
2355 ("Actual value: %d\n", DevExt.acMouseFeatureUsage[ASMBitFirstSetU32(VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR)]));
2356 g_test_SetMouseStatusGRRC = VERR_UNRESOLVED_ERROR;
2357 /* This should succeed as the host request should not be made
2358 * since nothing has changed. */
2359 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2360 &Session, &u32Data, sizeof(u32Data), NULL);
2361 AssertRCSuccess(rc);
2362 /* This should fail with VERR_UNRESOLVED_ERROR as set above. */
2363 u32Data = VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE;
2364 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2365 &Session, &u32Data, sizeof(u32Data), NULL);
2366 AssertMsg(rc == VERR_UNRESOLVED_ERROR, ("rc == %Rrc\n", rc));
2367 /* Untested paths: out of memory; race setting status to host */
2368 }
2369 RTSpinlockDestroy(Spinlock);
2370 g_test_fSetMouseStatus = false;
2371}
2372#endif
2373
2374
2375/**
2376 * Guest backdoor logging.
2377 *
2378 * @returns VBox status code.
2379 *
2380 * @param pDevExt The device extension.
2381 * @param pch The log message (need not be NULL terminated).
2382 * @param cbData Size of the buffer.
2383 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2384 */
2385static int VBoxGuestCommonIOCtl_Log(PVBOXGUESTDEVEXT pDevExt, const char *pch, size_t cbData, size_t *pcbDataReturned)
2386{
2387 NOREF(pch);
2388 NOREF(cbData);
2389 if (pDevExt->fLoggingEnabled)
2390 RTLogBackdoorPrintf("%.*s", cbData, pch);
2391 else
2392 Log(("%.*s", cbData, pch));
2393 if (pcbDataReturned)
2394 *pcbDataReturned = 0;
2395 return VINF_SUCCESS;
2396}
2397
2398
2399/**
2400 * Common IOCtl for user to kernel and kernel to kernel communication.
2401 *
2402 * This function only does the basic validation and then invokes
2403 * worker functions that takes care of each specific function.
2404 *
2405 * @returns VBox status code.
2406 *
2407 * @param iFunction The requested function.
2408 * @param pDevExt The device extension.
2409 * @param pSession The client session.
2410 * @param pvData The input/output data buffer. Can be NULL depending on the function.
2411 * @param cbData The max size of the data buffer.
2412 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2413 */
2414int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2415 void *pvData, size_t cbData, size_t *pcbDataReturned)
2416{
2417 int rc;
2418 Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
2419 iFunction, pDevExt, pSession, pvData, cbData));
2420
2421 /*
2422 * Make sure the returned data size is set to zero.
2423 */
2424 if (pcbDataReturned)
2425 *pcbDataReturned = 0;
2426
2427 /*
2428 * Define some helper macros to simplify validation.
2429 */
2430#define CHECKRET_RING0(mnemonic) \
2431 do { \
2432 if (pSession->R0Process != NIL_RTR0PROCESS) \
2433 { \
2434 LogFunc((mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
2435 pSession->Process, (uintptr_t)pSession->R0Process)); \
2436 return VERR_PERMISSION_DENIED; \
2437 } \
2438 } while (0)
2439#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
2440 do { \
2441 if (cbData < (cbMin)) \
2442 { \
2443 LogFunc((mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
2444 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
2445 return VERR_BUFFER_OVERFLOW; \
2446 } \
2447 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
2448 { \
2449 LogFunc((mnemonic ": Invalid pointer %p\n", pvData)); \
2450 return VERR_INVALID_POINTER; \
2451 } \
2452 } while (0)
2453#define CHECKRET_SIZE(mnemonic, cb) \
2454 do { \
2455 if (cbData != (cb)) \
2456 { \
2457 LogFunc((mnemonic ": cbData=%#zx (%zu) expected is %#zx (%zu)\n", \
2458 cbData, cbData, (size_t)(cb), (size_t)(cb))); \
2459 return VERR_BUFFER_OVERFLOW; \
2460 } \
2461 if ((cb) != 0 && !VALID_PTR(pvData)) \
2462 { \
2463 LogFunc((mnemonic ": Invalid pointer %p\n", pvData)); \
2464 return VERR_INVALID_POINTER; \
2465 } \
2466 } while (0)
2467
2468
2469 /*
2470 * Deal with variably sized requests first.
2471 */
2472 rc = VINF_SUCCESS;
2473 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
2474 {
2475 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
2476 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
2477 }
2478#ifdef VBOX_WITH_HGCM
2479 /*
2480 * These ones are a bit tricky.
2481 */
2482 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
2483 {
2484 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2485 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2486 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2487 fInterruptible, false /*f32bit*/, false /* fUserData */,
2488 0, cbData, pcbDataReturned);
2489 }
2490 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
2491 {
2492 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2493 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2494 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2495 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2496 false /*f32bit*/, false /* fUserData */,
2497 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2498 }
2499 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_USERDATA(0)))
2500 {
2501 bool fInterruptible = true;
2502 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2503 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2504 fInterruptible, false /*f32bit*/, true /* fUserData */,
2505 0, cbData, pcbDataReturned);
2506 }
2507# ifdef RT_ARCH_AMD64
2508 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
2509 {
2510 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2511 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2512 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2513 fInterruptible, true /*f32bit*/, false /* fUserData */,
2514 0, cbData, pcbDataReturned);
2515 }
2516 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
2517 {
2518 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2519 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2520 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2521 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2522 true /*f32bit*/, false /* fUserData */,
2523 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2524 }
2525# endif
2526#endif /* VBOX_WITH_HGCM */
2527 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
2528 {
2529 CHECKRET_MIN_SIZE("LOG", 1);
2530 rc = VBoxGuestCommonIOCtl_Log(pDevExt, (char *)pvData, cbData, pcbDataReturned);
2531 }
2532 else
2533 {
2534 switch (iFunction)
2535 {
2536 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
2537 CHECKRET_RING0("GETVMMDEVPORT");
2538 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
2539 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
2540 break;
2541
2542#ifndef RT_OS_WINDOWS /* Windows has its own implementation of this. */
2543 case VBOXGUEST_IOCTL_SET_MOUSE_NOTIFY_CALLBACK:
2544 CHECKRET_RING0("SET_MOUSE_NOTIFY_CALLBACK");
2545 CHECKRET_SIZE("SET_MOUSE_NOTIFY_CALLBACK", sizeof(VBoxGuestMouseSetNotifyCallback));
2546 rc = VBoxGuestCommonIOCtl_SetMouseNotifyCallback(pDevExt, (VBoxGuestMouseSetNotifyCallback *)pvData);
2547 break;
2548#endif
2549
2550 case VBOXGUEST_IOCTL_WAITEVENT:
2551 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
2552 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, pSession, (VBoxGuestWaitEventInfo *)pvData,
2553 pcbDataReturned, pSession->R0Process != NIL_RTR0PROCESS);
2554 break;
2555
2556 case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
2557 if (cbData != 0)
2558 rc = VERR_INVALID_PARAMETER;
2559 rc = VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
2560 break;
2561
2562 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
2563 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
2564 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, (VBoxGuestFilterMaskInfo *)pvData);
2565 break;
2566
2567#ifdef VBOX_WITH_HGCM
2568 case VBOXGUEST_IOCTL_HGCM_CONNECT:
2569# ifdef RT_ARCH_AMD64
2570 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
2571# endif
2572 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
2573 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
2574 break;
2575
2576 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
2577# ifdef RT_ARCH_AMD64
2578 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
2579# endif
2580 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
2581 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
2582 break;
2583#endif /* VBOX_WITH_HGCM */
2584
2585 case VBOXGUEST_IOCTL_CHECK_BALLOON:
2586 CHECKRET_MIN_SIZE("CHECK_MEMORY_BALLOON", sizeof(VBoxGuestCheckBalloonInfo));
2587 rc = VBoxGuestCommonIOCtl_CheckMemoryBalloon(pDevExt, pSession, (VBoxGuestCheckBalloonInfo *)pvData, pcbDataReturned);
2588 break;
2589
2590 case VBOXGUEST_IOCTL_CHANGE_BALLOON:
2591 CHECKRET_MIN_SIZE("CHANGE_MEMORY_BALLOON", sizeof(VBoxGuestChangeBalloonInfo));
2592 rc = VBoxGuestCommonIOCtl_ChangeMemoryBalloon(pDevExt, pSession, (VBoxGuestChangeBalloonInfo *)pvData, pcbDataReturned);
2593 break;
2594
2595 case VBOXGUEST_IOCTL_WRITE_CORE_DUMP:
2596 CHECKRET_MIN_SIZE("WRITE_CORE_DUMP", sizeof(VBoxGuestWriteCoreDump));
2597 rc = VBoxGuestCommonIOCtl_WriteCoreDump(pDevExt, (VBoxGuestWriteCoreDump *)pvData);
2598 break;
2599
2600#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
2601 case VBOXGUEST_IOCTL_ENABLE_VRDP_SESSION:
2602 rc = VBoxGuestCommonIOCtl_EnableVRDPSession(pDevExt, pSession);
2603 break;
2604
2605 case VBOXGUEST_IOCTL_DISABLE_VRDP_SESSION:
2606 rc = VBoxGuestCommonIOCtl_DisableVRDPSession(pDevExt, pSession);
2607 break;
2608#endif /* VBOX_WITH_VRDP_SESSION_HANDLING */
2609 case VBOXGUEST_IOCTL_SET_MOUSE_STATUS:
2610 CHECKRET_SIZE("SET_MOUSE_STATUS", sizeof(uint32_t));
2611 rc = VBoxGuestCommonIOCtl_SetMouseStatus(pDevExt, pSession,
2612 *(uint32_t *)pvData);
2613 break;
2614
2615 default:
2616 {
2617 LogRel(("VBoxGuestCommonIOCtl: Unknown request iFunction=%#x Stripped size=%#x\n", iFunction,
2618 VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
2619 rc = VERR_NOT_SUPPORTED;
2620 break;
2621 }
2622 }
2623 }
2624
2625 Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
2626 return rc;
2627}
2628
2629
2630
2631/**
2632 * Common interrupt service routine.
2633 *
2634 * This deals with events and with waking up thread waiting for those events.
2635 *
2636 * @returns true if it was our interrupt, false if it wasn't.
2637 * @param pDevExt The VBoxGuest device extension.
2638 */
2639bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
2640{
2641#ifndef RT_OS_WINDOWS
2642 VBoxGuestMouseSetNotifyCallback MouseNotifyCallback = { NULL, NULL };
2643#endif
2644 bool fMousePositionChanged = false;
2645 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
2646 int rc = 0;
2647 bool fOurIrq;
2648
2649 /*
2650 * Make sure we've initialized the device extension.
2651 */
2652 if (RT_UNLIKELY(!pReq))
2653 return false;
2654
2655 /*
2656 * Enter the spinlock, increase the ISR count and check if it's our IRQ or
2657 * not.
2658 */
2659 RTSpinlockAcquire(pDevExt->EventSpinlock);
2660 ASMAtomicIncU32(&pDevExt->cISR);
2661 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
2662 if (fOurIrq)
2663 {
2664 /*
2665 * Acknowlegde events.
2666 * We don't use VbglGRPerform here as it may take another spinlocks.
2667 */
2668 pReq->header.rc = VERR_INTERNAL_ERROR;
2669 pReq->events = 0;
2670 ASMCompilerBarrier();
2671 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
2672 ASMCompilerBarrier(); /* paranoia */
2673 if (RT_SUCCESS(pReq->header.rc))
2674 {
2675 uint32_t fEvents = pReq->events;
2676 PVBOXGUESTWAIT pWait;
2677 PVBOXGUESTWAIT pSafe;
2678
2679 Log(("VBoxGuestCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
2680
2681 /*
2682 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
2683 */
2684 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
2685 {
2686#ifndef RT_OS_WINDOWS
2687 MouseNotifyCallback = pDevExt->MouseNotifyCallback;
2688#endif
2689 fMousePositionChanged = true;
2690 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
2691 }
2692
2693#ifdef VBOX_WITH_HGCM
2694 /*
2695 * The HGCM event/list is kind of different in that we evaluate all entries.
2696 */
2697 if (fEvents & VMMDEV_EVENT_HGCM)
2698 {
2699 RTListForEachSafe(&pDevExt->HGCMWaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
2700 {
2701 if (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE)
2702 {
2703 pWait->fResEvents = VMMDEV_EVENT_HGCM;
2704 RTListNodeRemove(&pWait->ListNode);
2705# ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2706 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
2707# else
2708 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
2709 rc |= RTSemEventMultiSignal(pWait->Event);
2710# endif
2711 }
2712 }
2713 fEvents &= ~VMMDEV_EVENT_HGCM;
2714 }
2715#endif
2716
2717 /*
2718 * Normal FIFO waiter evaluation.
2719 */
2720 fEvents |= pDevExt->f32PendingEvents;
2721 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
2722 {
2723 if ( (pWait->fReqEvents & fEvents)
2724 && !pWait->fResEvents)
2725 {
2726 pWait->fResEvents = pWait->fReqEvents & fEvents;
2727 fEvents &= ~pWait->fResEvents;
2728 RTListNodeRemove(&pWait->ListNode);
2729#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2730 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
2731#else
2732 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
2733 rc |= RTSemEventMultiSignal(pWait->Event);
2734#endif
2735 if (!fEvents)
2736 break;
2737 }
2738 }
2739 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
2740 }
2741 else /* something is serious wrong... */
2742 Log(("VBoxGuestCommonISR: acknowledge events failed rc=%Rrc (events=%#x)!!\n",
2743 pReq->header.rc, pReq->events));
2744 }
2745 else
2746 LogFlow(("VBoxGuestCommonISR: not ours\n"));
2747
2748 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
2749
2750#if defined(VBOXGUEST_USE_DEFERRED_WAKE_UP) && !defined(RT_OS_WINDOWS)
2751 /*
2752 * Do wake-ups.
2753 * Note. On Windows this isn't possible at this IRQL, so a DPC will take
2754 * care of it.
2755 */
2756 VBoxGuestWaitDoWakeUps(pDevExt);
2757#endif
2758
2759 /*
2760 * Work the poll and async notification queues on OSes that implements that.
2761 * (Do this outside the spinlock to prevent some recursive spinlocking.)
2762 */
2763 if (fMousePositionChanged)
2764 {
2765 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
2766 VBoxGuestNativeISRMousePollEvent(pDevExt);
2767#ifndef RT_OS_WINDOWS
2768 if (MouseNotifyCallback.pfnNotify)
2769 MouseNotifyCallback.pfnNotify(MouseNotifyCallback.pvUser);
2770#endif
2771 }
2772
2773 ASMAtomicDecU32(&pDevExt->cISR);
2774 Assert(rc == 0);
2775 return fOurIrq;
2776}
2777
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette