VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 41639

Last change on this file since 41639 was 41639, checked in by vboxsync, 13 years ago

Additions/common/VBoxGuest: C fix.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 96.2 KB
Line 
1/* $Id: VBoxGuest.cpp 41639 2012-06-11 09:10:17Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEFAULT
23#include "VBoxGuestInternal.h"
24#include "VBoxGuest2.h"
25#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
26#include <VBox/log.h>
27#include <iprt/mem.h>
28#include <iprt/time.h>
29#include <iprt/memobj.h>
30#include <iprt/asm.h>
31#include <iprt/asm-amd64-x86.h>
32#include <iprt/string.h>
33#include <iprt/process.h>
34#include <iprt/assert.h>
35#include <iprt/param.h>
36#ifdef VBOX_WITH_HGCM
37# include <iprt/thread.h>
38#endif
39#include "version-generated.h"
40#if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
41# include "revision-generated.h"
42#endif
43#ifdef RT_OS_WINDOWS
44# ifndef CTL_CODE
45# include <Windows.h>
46# endif
47#endif
48#if defined(RT_OS_SOLARIS)
49# include <iprt/rand.h>
50#endif
51
52
53/*******************************************************************************
54* Internal Functions *
55*******************************************************************************/
56#ifdef VBOX_WITH_HGCM
57static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
58#endif
59#ifdef DEBUG
60static void testSetMouseStatus(void);
61#endif
62static int VBoxGuestCommonIOCtl_SetMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fFeatures);
63
64
65/*******************************************************************************
66* Global Variables *
67*******************************************************************************/
68static const size_t cbChangeMemBalloonReq = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
69
70#if defined(RT_OS_SOLARIS)
71/**
72 * Drag in the rest of IRPT since we share it with the
73 * rest of the kernel modules on Solaris.
74 */
75PFNRT g_apfnVBoxGuestIPRTDeps[] =
76{
77 /* VirtioNet */
78 (PFNRT)RTRandBytes,
79 NULL
80};
81#endif /* RT_OS_SOLARIS */
82
83
84/**
85 * Reserves memory in which the VMM can relocate any guest mappings
86 * that are floating around.
87 *
88 * This operation is a little bit tricky since the VMM might not accept
89 * just any address because of address clashes between the three contexts
90 * it operates in, so use a small stack to perform this operation.
91 *
92 * @returns VBox status code (ignored).
93 * @param pDevExt The device extension.
94 */
95static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
96{
97 /*
98 * Query the required space.
99 */
100 VMMDevReqHypervisorInfo *pReq;
101 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
102 if (RT_FAILURE(rc))
103 return rc;
104 pReq->hypervisorStart = 0;
105 pReq->hypervisorSize = 0;
106 rc = VbglGRPerform(&pReq->header);
107 if (RT_FAILURE(rc)) /* this shouldn't happen! */
108 {
109 VbglGRFree(&pReq->header);
110 return rc;
111 }
112
113 /*
114 * The VMM will report back if there is nothing it wants to map, like for
115 * instance in VT-x and AMD-V mode.
116 */
117 if (pReq->hypervisorSize == 0)
118 Log(("vboxGuestInitFixateGuestMappings: nothing to do\n"));
119 else
120 {
121 /*
122 * We have to try several times since the host can be picky
123 * about certain addresses.
124 */
125 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
126 uint32_t cbHypervisor = pReq->hypervisorSize;
127 RTR0MEMOBJ ahTries[5];
128 uint32_t iTry;
129 bool fBitched = false;
130 Log(("vboxGuestInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
131 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
132 {
133 /*
134 * Reserve space, or if that isn't supported, create a object for
135 * some fictive physical memory and map that in to kernel space.
136 *
137 * To make the code a bit uglier, most systems cannot help with
138 * 4MB alignment, so we have to deal with that in addition to
139 * having two ways of getting the memory.
140 */
141 uint32_t uAlignment = _4M;
142 RTR0MEMOBJ hObj;
143 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
144 if (rc == VERR_NOT_SUPPORTED)
145 {
146 uAlignment = PAGE_SIZE;
147 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
148 }
149 /*
150 * If both RTR0MemObjReserveKernel calls above failed because either not supported or
151 * not implemented at all at the current platform, try to map the memory object into the
152 * virtual kernel space.
153 */
154 if (rc == VERR_NOT_SUPPORTED)
155 {
156 if (hFictive == NIL_RTR0MEMOBJ)
157 {
158 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
159 if (RT_FAILURE(rc))
160 break;
161 hFictive = hObj;
162 }
163 uAlignment = _4M;
164 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
165 if (rc == VERR_NOT_SUPPORTED)
166 {
167 uAlignment = PAGE_SIZE;
168 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
169 }
170 }
171 if (RT_FAILURE(rc))
172 {
173 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
174 rc, cbHypervisor, uAlignment, iTry));
175 fBitched = true;
176 break;
177 }
178
179 /*
180 * Try set it.
181 */
182 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
183 pReq->header.rc = VERR_INTERNAL_ERROR;
184 pReq->hypervisorSize = cbHypervisor;
185 pReq->hypervisorStart = (uintptr_t)RTR0MemObjAddress(hObj);
186 if ( uAlignment == PAGE_SIZE
187 && pReq->hypervisorStart & (_4M - 1))
188 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
189 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
190
191 rc = VbglGRPerform(&pReq->header);
192 if (RT_SUCCESS(rc))
193 {
194 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
195 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
196 RTR0MemObjAddress(pDevExt->hGuestMappings),
197 RTR0MemObjSize(pDevExt->hGuestMappings),
198 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
199 break;
200 }
201 ahTries[iTry] = hObj;
202 }
203
204 /*
205 * Cleanup failed attempts.
206 */
207 while (iTry-- > 0)
208 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
209 if ( RT_FAILURE(rc)
210 && hFictive != NIL_RTR0PTR)
211 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
212 if (RT_FAILURE(rc) && !fBitched)
213 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
214 }
215 VbglGRFree(&pReq->header);
216
217 /*
218 * We ignore failed attempts for now.
219 */
220 return VINF_SUCCESS;
221}
222
223
224/**
225 * Undo what vboxGuestInitFixateGuestMappings did.
226 *
227 * @param pDevExt The device extension.
228 */
229static void vboxGuestTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
230{
231 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
232 {
233 /*
234 * Tell the host that we're going to free the memory we reserved for
235 * it, the free it up. (Leak the memory if anything goes wrong here.)
236 */
237 VMMDevReqHypervisorInfo *pReq;
238 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
239 if (RT_SUCCESS(rc))
240 {
241 pReq->hypervisorStart = 0;
242 pReq->hypervisorSize = 0;
243 rc = VbglGRPerform(&pReq->header);
244 VbglGRFree(&pReq->header);
245 }
246 if (RT_SUCCESS(rc))
247 {
248 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
249 AssertRC(rc);
250 }
251 else
252 LogRel(("vboxGuestTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
253
254 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
255 }
256}
257
258
259/**
260 * Sets the interrupt filter mask during initialization and termination.
261 *
262 * This will ASSUME that we're the ones in carge over the mask, so
263 * we'll simply clear all bits we don't set.
264 *
265 * @returns VBox status code (ignored).
266 * @param pDevExt The device extension.
267 * @param fMask The new mask.
268 */
269static int vboxGuestSetFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask)
270{
271 VMMDevCtlGuestFilterMask *pReq;
272 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
273 if (RT_SUCCESS(rc))
274 {
275 pReq->u32OrMask = fMask;
276 pReq->u32NotMask = ~fMask;
277 rc = VbglGRPerform(&pReq->header);
278 if (RT_FAILURE(rc))
279 LogRel(("vboxGuestSetFilterMask: failed with rc=%Rrc\n", rc));
280 VbglGRFree(&pReq->header);
281 }
282 return rc;
283}
284
285
286/**
287 * Inflate the balloon by one chunk represented by an R0 memory object.
288 *
289 * The caller owns the balloon mutex.
290 *
291 * @returns IPRT status code.
292 * @param pMemObj Pointer to the R0 memory object.
293 * @param pReq The pre-allocated request for performing the VMMDev call.
294 */
295static int vboxGuestBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
296{
297 uint32_t iPage;
298 int rc;
299
300 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
301 {
302 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
303 pReq->aPhysPage[iPage] = phys;
304 }
305
306 pReq->fInflate = true;
307 pReq->header.size = cbChangeMemBalloonReq;
308 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
309
310 rc = VbglGRPerform(&pReq->header);
311 if (RT_FAILURE(rc))
312 LogRel(("vboxGuestBalloonInflate: VbglGRPerform failed. rc=%Rrc\n", rc));
313 return rc;
314}
315
316
317/**
318 * Deflate the balloon by one chunk - info the host and free the memory object.
319 *
320 * The caller owns the balloon mutex.
321 *
322 * @returns IPRT status code.
323 * @param pMemObj Pointer to the R0 memory object.
324 * The memory object will be freed afterwards.
325 * @param pReq The pre-allocated request for performing the VMMDev call.
326 */
327static int vboxGuestBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
328{
329 uint32_t iPage;
330 int rc;
331
332 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
333 {
334 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
335 pReq->aPhysPage[iPage] = phys;
336 }
337
338 pReq->fInflate = false;
339 pReq->header.size = cbChangeMemBalloonReq;
340 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
341
342 rc = VbglGRPerform(&pReq->header);
343 if (RT_FAILURE(rc))
344 {
345 LogRel(("vboxGuestBalloonDeflate: VbglGRPerform failed. rc=%Rrc\n", rc));
346 return rc;
347 }
348
349 rc = RTR0MemObjFree(*pMemObj, true);
350 if (RT_FAILURE(rc))
351 {
352 LogRel(("vboxGuestBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
353 return rc;
354 }
355
356 *pMemObj = NIL_RTR0MEMOBJ;
357 return VINF_SUCCESS;
358}
359
360
361/**
362 * Inflate/deflate the memory balloon and notify the host.
363 *
364 * This is a worker used by VBoxGuestCommonIOCtl_CheckMemoryBalloon - it takes
365 * the mutex.
366 *
367 * @returns VBox status code.
368 * @param pDevExt The device extension.
369 * @param pSession The session.
370 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
371 * @param pfHandleInR3 Where to return the handle-in-ring3 indicator
372 * (VINF_SUCCESS if set).
373 */
374static int vboxGuestSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, uint32_t *pfHandleInR3)
375{
376 int rc = VINF_SUCCESS;
377
378 if (pDevExt->MemBalloon.fUseKernelAPI)
379 {
380 VMMDevChangeMemBalloon *pReq;
381 uint32_t i;
382
383 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
384 {
385 LogRel(("vboxGuestSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
386 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
387 return VERR_INVALID_PARAMETER;
388 }
389
390 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
391 return VINF_SUCCESS; /* nothing to do */
392
393 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
394 && !pDevExt->MemBalloon.paMemObj)
395 {
396 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
397 if (!pDevExt->MemBalloon.paMemObj)
398 {
399 LogRel(("VBoxGuestSetBalloonSizeKernel: no memory for paMemObj!\n"));
400 return VERR_NO_MEMORY;
401 }
402 }
403
404 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
405 if (RT_FAILURE(rc))
406 return rc;
407
408 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
409 {
410 /* inflate */
411 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
412 {
413 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
414 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
415 if (RT_FAILURE(rc))
416 {
417 if (rc == VERR_NOT_SUPPORTED)
418 {
419 /* not supported -- fall back to the R3-allocated memory. */
420 rc = VINF_SUCCESS;
421 pDevExt->MemBalloon.fUseKernelAPI = false;
422 Assert(pDevExt->MemBalloon.cChunks == 0);
423 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
424 }
425 /* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
426 * cannot allocate more memory => don't try further, just stop here */
427 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
428 break;
429 }
430
431 rc = vboxGuestBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
432 if (RT_FAILURE(rc))
433 {
434 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
435 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
436 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
437 break;
438 }
439 pDevExt->MemBalloon.cChunks++;
440 }
441 }
442 else
443 {
444 /* deflate */
445 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
446 {
447 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
448 if (RT_FAILURE(rc))
449 {
450 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
451 break;
452 }
453 pDevExt->MemBalloon.cChunks--;
454 }
455 }
456
457 VbglGRFree(&pReq->header);
458 }
459
460 /*
461 * Set the handle-in-ring3 indicator. When set Ring-3 will have to work
462 * the balloon changes via the other API.
463 */
464 *pfHandleInR3 = pDevExt->MemBalloon.fUseKernelAPI ? false : true;
465
466 return rc;
467}
468
469
470/**
471 * Helper to reinit the VBoxVMM communication after hibernation.
472 *
473 * @returns VBox status code.
474 * @param pDevExt The device extension.
475 * @param enmOSType The OS type.
476 */
477int VBoxGuestReinitDevExtAfterHibernation(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
478{
479 int rc = VBoxGuestReportGuestInfo(enmOSType);
480 if (RT_SUCCESS(rc))
481 {
482 rc = VBoxGuestReportDriverStatus(true /* Driver is active */);
483 if (RT_FAILURE(rc))
484 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: could not report guest driver status, rc=%Rrc\n", rc));
485 }
486 else
487 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: could not report guest information to host, rc=%Rrc\n", rc));
488 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: returned with rc=%Rrc\n", rc));
489 return rc;
490}
491
492
493/**
494 * Inflate/deflate the balloon by one chunk.
495 *
496 * Worker for VBoxGuestCommonIOCtl_ChangeMemoryBalloon - it takes the mutex.
497 *
498 * @returns VBox status code.
499 * @param pDevExt The device extension.
500 * @param pSession The session.
501 * @param u64ChunkAddr The address of the chunk to add to / remove from the
502 * balloon.
503 * @param fInflate Inflate if true, deflate if false.
504 */
505static int vboxGuestSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
506 uint64_t u64ChunkAddr, bool fInflate)
507{
508 VMMDevChangeMemBalloon *pReq;
509 int rc = VINF_SUCCESS;
510 uint32_t i;
511 PRTR0MEMOBJ pMemObj = NULL;
512
513 if (fInflate)
514 {
515 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
516 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
517 {
518 LogRel(("vboxGuestSetBalloonSize: cannot inflate balloon, already have %u chunks (max=%u)\n",
519 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
520 return VERR_INVALID_PARAMETER;
521 }
522
523 if (!pDevExt->MemBalloon.paMemObj)
524 {
525 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
526 if (!pDevExt->MemBalloon.paMemObj)
527 {
528 LogRel(("VBoxGuestSetBalloonSizeFromUser: no memory for paMemObj!\n"));
529 return VERR_NO_MEMORY;
530 }
531 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
532 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
533 }
534 }
535 else
536 {
537 if (pDevExt->MemBalloon.cChunks == 0)
538 {
539 AssertMsgFailed(("vboxGuestSetBalloonSize: cannot decrease balloon, already at size 0\n"));
540 return VERR_INVALID_PARAMETER;
541 }
542 }
543
544 /*
545 * Enumerate all memory objects and check if the object is already registered.
546 */
547 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
548 {
549 if ( fInflate
550 && !pMemObj
551 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
552 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
553 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == u64ChunkAddr)
554 {
555 if (fInflate)
556 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
557 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
558 break;
559 }
560 }
561 if (!pMemObj)
562 {
563 if (fInflate)
564 {
565 /* no free object pointer found -- should not happen */
566 return VERR_NO_MEMORY;
567 }
568
569 /* cannot free this memory as it wasn't provided before */
570 return VERR_NOT_FOUND;
571 }
572
573 /*
574 * Try inflate / default the balloon as requested.
575 */
576 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
577 if (RT_FAILURE(rc))
578 return rc;
579
580 if (fInflate)
581 {
582 rc = RTR0MemObjLockUser(pMemObj, u64ChunkAddr, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
583 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
584 if (RT_SUCCESS(rc))
585 {
586 rc = vboxGuestBalloonInflate(pMemObj, pReq);
587 if (RT_SUCCESS(rc))
588 pDevExt->MemBalloon.cChunks++;
589 else
590 {
591 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
592 RTR0MemObjFree(*pMemObj, true);
593 *pMemObj = NIL_RTR0MEMOBJ;
594 }
595 }
596 }
597 else
598 {
599 rc = vboxGuestBalloonDeflate(pMemObj, pReq);
600 if (RT_SUCCESS(rc))
601 pDevExt->MemBalloon.cChunks--;
602 else
603 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
604 }
605
606 VbglGRFree(&pReq->header);
607 return rc;
608}
609
610
611/**
612 * Cleanup the memory balloon of a session.
613 *
614 * Will request the balloon mutex, so it must be valid and the caller must not
615 * own it already.
616 *
617 * @param pDevExt The device extension.
618 * @param pDevExt The session. Can be NULL at unload.
619 */
620static void vboxGuestCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
621{
622 RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
623 if ( pDevExt->MemBalloon.pOwner == pSession
624 || pSession == NULL /*unload*/)
625 {
626 if (pDevExt->MemBalloon.paMemObj)
627 {
628 VMMDevChangeMemBalloon *pReq;
629 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
630 if (RT_SUCCESS(rc))
631 {
632 uint32_t i;
633 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
634 {
635 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
636 if (RT_FAILURE(rc))
637 {
638 LogRel(("vboxGuestCloseMemBalloon: Deflate failed with rc=%Rrc. Will leak %u chunks.\n",
639 rc, pDevExt->MemBalloon.cChunks));
640 break;
641 }
642 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
643 pDevExt->MemBalloon.cChunks--;
644 }
645 VbglGRFree(&pReq->header);
646 }
647 else
648 LogRel(("vboxGuestCloseMemBalloon: Failed to allocate VMMDev request buffer (rc=%Rrc). Will leak %u chunks.\n",
649 rc, pDevExt->MemBalloon.cChunks));
650 RTMemFree(pDevExt->MemBalloon.paMemObj);
651 pDevExt->MemBalloon.paMemObj = NULL;
652 }
653
654 pDevExt->MemBalloon.pOwner = NULL;
655 }
656 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
657}
658
659
660/**
661 * Initializes the VBoxGuest device extension when the
662 * device driver is loaded.
663 *
664 * The native code locates the VMMDev on the PCI bus and retrieve
665 * the MMIO and I/O port ranges, this function will take care of
666 * mapping the MMIO memory (if present). Upon successful return
667 * the native code should set up the interrupt handler.
668 *
669 * @returns VBox status code.
670 *
671 * @param pDevExt The device extension. Allocated by the native code.
672 * @param IOPortBase The base of the I/O port range.
673 * @param pvMMIOBase The base of the MMIO memory mapping.
674 * This is optional, pass NULL if not present.
675 * @param cbMMIO The size of the MMIO memory mapping.
676 * This is optional, pass 0 if not present.
677 * @param enmOSType The guest OS type to report to the VMMDev.
678 * @param fFixedEvents Events that will be enabled upon init and no client
679 * will ever be allowed to mask.
680 */
681int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
682 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
683{
684 int rc, rc2;
685 unsigned i;
686
687 /*
688 * Adjust fFixedEvents.
689 */
690#ifdef VBOX_WITH_HGCM
691 fFixedEvents |= VMMDEV_EVENT_HGCM;
692#endif
693
694 /*
695 * Initialize the data.
696 */
697 pDevExt->IOPortBase = IOPortBase;
698 pDevExt->pVMMDevMemory = NULL;
699 pDevExt->fFixedEvents = fFixedEvents;
700 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
701 pDevExt->EventSpinlock = NIL_RTSPINLOCK;
702 pDevExt->pIrqAckEvents = NULL;
703 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
704 RTListInit(&pDevExt->WaitList);
705#ifdef VBOX_WITH_HGCM
706 RTListInit(&pDevExt->HGCMWaitList);
707#endif
708#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
709 RTListInit(&pDevExt->WakeUpList);
710#endif
711 RTListInit(&pDevExt->WokenUpList);
712 RTListInit(&pDevExt->FreeList);
713#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
714 pDevExt->fVRDPEnabled = false;
715#endif
716 pDevExt->fLoggingEnabled = false;
717 pDevExt->f32PendingEvents = 0;
718 pDevExt->u32MousePosChangedSeq = 0;
719 pDevExt->SessionSpinlock = NIL_RTSPINLOCK;
720 pDevExt->MemBalloon.hMtx = NIL_RTSEMFASTMUTEX;
721 pDevExt->MemBalloon.cChunks = 0;
722 pDevExt->MemBalloon.cMaxChunks = 0;
723 pDevExt->MemBalloon.fUseKernelAPI = true;
724 pDevExt->MemBalloon.paMemObj = NULL;
725 pDevExt->MemBalloon.pOwner = NULL;
726 for (i = 0; i < RT_ELEMENTS(pDevExt->acMouseFeatureUsage); ++i)
727 pDevExt->acMouseFeatureUsage[i] = 0;
728 pDevExt->fMouseStatus = 0;
729
730 /*
731 * If there is an MMIO region validate the version and size.
732 */
733 if (pvMMIOBase)
734 {
735 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
736 Assert(cbMMIO);
737 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
738 && pVMMDev->u32Size >= 32
739 && pVMMDev->u32Size <= cbMMIO)
740 {
741 pDevExt->pVMMDevMemory = pVMMDev;
742 Log(("VBoxGuestInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
743 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
744 }
745 else /* try live without it. */
746 LogRel(("VBoxGuestInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
747 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
748 }
749
750 /*
751 * Create the wait and session spinlocks as well as the ballooning mutex.
752 */
753 rc = RTSpinlockCreate(&pDevExt->EventSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestEvent");
754 if (RT_SUCCESS(rc))
755 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestSession");
756 if (RT_FAILURE(rc))
757 {
758 LogRel(("VBoxGuestInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
759 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
760 RTSpinlockDestroy(pDevExt->EventSpinlock);
761 return rc;
762 }
763
764 rc = RTSemFastMutexCreate(&pDevExt->MemBalloon.hMtx);
765 if (RT_FAILURE(rc))
766 {
767 LogRel(("VBoxGuestInitDevExt: failed to create mutex, rc=%Rrc!\n", rc));
768 RTSpinlockDestroy(pDevExt->SessionSpinlock);
769 RTSpinlockDestroy(pDevExt->EventSpinlock);
770 return rc;
771 }
772
773 /*
774 * Initialize the guest library and report the guest info back to VMMDev,
775 * set the interrupt control filter mask, and fixate the guest mappings
776 * made by the VMM.
777 */
778 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
779 if (RT_SUCCESS(rc))
780 {
781 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
782 if (RT_SUCCESS(rc))
783 {
784 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
785 Assert(pDevExt->PhysIrqAckEvents != 0);
786
787 rc = VBoxGuestReportGuestInfo(enmOSType);
788 if (RT_SUCCESS(rc))
789 {
790 rc = vboxGuestSetFilterMask(pDevExt, fFixedEvents);
791 if (RT_SUCCESS(rc))
792 {
793 /*
794 * Disable guest graphics capability by default. The guest specific
795 * graphics driver will re-enable this when it is necessary.
796 */
797 rc = VBoxGuestSetGuestCapabilities(0, VMMDEV_GUEST_SUPPORTS_GRAPHICS);
798 if (RT_SUCCESS(rc))
799 {
800 vboxGuestInitFixateGuestMappings(pDevExt);
801
802#ifdef DEBUG
803 testSetMouseStatus(); /* Other tests? */
804#endif
805
806 rc = VBoxGuestReportDriverStatus(true /* Driver is active */);
807 if (RT_FAILURE(rc))
808 LogRel(("VBoxGuestInitDevExt: VBoxReportGuestDriverStatus failed, rc=%Rrc\n", rc));
809
810 Log(("VBoxGuestInitDevExt: returns success\n"));
811 return VINF_SUCCESS;
812 }
813
814 LogRel(("VBoxGuestInitDevExt: VBoxGuestSetGuestCapabilities failed, rc=%Rrc\n", rc));
815 }
816 else
817 LogRel(("VBoxGuestInitDevExt: vboxGuestSetFilterMask failed, rc=%Rrc\n", rc));
818 }
819 else
820 LogRel(("VBoxGuestInitDevExt: VBoxReportGuestInfo failed, rc=%Rrc\n", rc));
821 VbglGRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
822 }
823 else
824 LogRel(("VBoxGuestInitDevExt: VBoxGRAlloc failed, rc=%Rrc\n", rc));
825
826 VbglTerminate();
827 }
828 else
829 LogRel(("VBoxGuestInitDevExt: VbglInit failed, rc=%Rrc\n", rc));
830
831 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
832 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
833 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
834 return rc; /* (failed) */
835}
836
837
838/**
839 * Deletes all the items in a wait chain.
840 * @param pList The head of the chain.
841 */
842static void VBoxGuestDeleteWaitList(PRTLISTNODE pList)
843{
844 while (!RTListIsEmpty(pList))
845 {
846 int rc2;
847 PVBOXGUESTWAIT pWait = RTListGetFirst(pList, VBOXGUESTWAIT, ListNode);
848 RTListNodeRemove(&pWait->ListNode);
849
850 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
851 pWait->Event = NIL_RTSEMEVENTMULTI;
852 pWait->pSession = NULL;
853 RTMemFree(pWait);
854 }
855}
856
857
858/**
859 * Destroys the VBoxGuest device extension.
860 *
861 * The native code should call this before the driver is loaded,
862 * but don't call this on shutdown.
863 *
864 * @param pDevExt The device extension.
865 */
866void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
867{
868 int rc2;
869 Log(("VBoxGuestDeleteDevExt:\n"));
870 Log(("VBoxGuest: The additions driver is terminating.\n"));
871
872 /*
873 * Clean up the bits that involves the host first.
874 */
875 vboxGuestTermUnfixGuestMappings(pDevExt);
876 VBoxGuestSetGuestCapabilities(0, UINT32_MAX); /* clears all capabilities */
877 vboxGuestSetFilterMask(pDevExt, 0); /* filter all events */
878 vboxGuestCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
879
880 /*
881 * Cleanup all the other resources.
882 */
883 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
884 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
885 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
886
887 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
888#ifdef VBOX_WITH_HGCM
889 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
890#endif
891#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
892 VBoxGuestDeleteWaitList(&pDevExt->WakeUpList);
893#endif
894 VBoxGuestDeleteWaitList(&pDevExt->WokenUpList);
895 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
896
897 VbglTerminate();
898
899 pDevExt->pVMMDevMemory = NULL;
900
901 pDevExt->IOPortBase = 0;
902 pDevExt->pIrqAckEvents = NULL;
903}
904
905
906/**
907 * Creates a VBoxGuest user session.
908 *
909 * The native code calls this when a ring-3 client opens the device.
910 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
911 *
912 * @returns VBox status code.
913 * @param pDevExt The device extension.
914 * @param ppSession Where to store the session on success.
915 */
916int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
917{
918 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
919 if (RT_UNLIKELY(!pSession))
920 {
921 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
922 return VERR_NO_MEMORY;
923 }
924
925 pSession->Process = RTProcSelf();
926 pSession->R0Process = RTR0ProcHandleSelf();
927 pSession->pDevExt = pDevExt;
928
929 *ppSession = pSession;
930 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
931 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
932 return VINF_SUCCESS;
933}
934
935
936/**
937 * Creates a VBoxGuest kernel session.
938 *
939 * The native code calls this when a ring-0 client connects to the device.
940 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
941 *
942 * @returns VBox status code.
943 * @param pDevExt The device extension.
944 * @param ppSession Where to store the session on success.
945 */
946int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
947{
948 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
949 if (RT_UNLIKELY(!pSession))
950 {
951 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
952 return VERR_NO_MEMORY;
953 }
954
955 pSession->Process = NIL_RTPROCESS;
956 pSession->R0Process = NIL_RTR0PROCESS;
957 pSession->pDevExt = pDevExt;
958
959 *ppSession = pSession;
960 LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
961 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
962 return VINF_SUCCESS;
963}
964
965
966
967/**
968 * Closes a VBoxGuest session.
969 *
970 * @param pDevExt The device extension.
971 * @param pSession The session to close (and free).
972 */
973void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
974{
975 unsigned i; NOREF(i);
976 Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
977 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
978
979#ifdef VBOX_WITH_HGCM
980 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
981 if (pSession->aHGCMClientIds[i])
982 {
983 VBoxGuestHGCMDisconnectInfo Info;
984 Info.result = 0;
985 Info.u32ClientID = pSession->aHGCMClientIds[i];
986 pSession->aHGCMClientIds[i] = 0;
987 Log(("VBoxGuestCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
988 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
989 }
990#endif
991
992 pSession->pDevExt = NULL;
993 pSession->Process = NIL_RTPROCESS;
994 pSession->R0Process = NIL_RTR0PROCESS;
995 vboxGuestCloseMemBalloon(pDevExt, pSession);
996 /* Reset any mouse status flags which the session may have set. */
997 VBoxGuestCommonIOCtl_SetMouseStatus(pDevExt, pSession, 0);
998 RTMemFree(pSession);
999}
1000
1001
1002/**
1003 * Allocates a wait-for-event entry.
1004 *
1005 * @returns The wait-for-event entry.
1006 * @param pDevExt The device extension.
1007 * @param pSession The session that's allocating this. Can be NULL.
1008 */
1009static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1010{
1011 /*
1012 * Allocate it one way or the other.
1013 */
1014 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1015 if (pWait)
1016 {
1017 RTSpinlockAcquire(pDevExt->EventSpinlock);
1018
1019 pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1020 if (pWait)
1021 RTListNodeRemove(&pWait->ListNode);
1022
1023 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1024 }
1025 if (!pWait)
1026 {
1027 static unsigned s_cErrors = 0;
1028 int rc;
1029
1030 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1031 if (!pWait)
1032 {
1033 if (s_cErrors++ < 32)
1034 LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
1035 return NULL;
1036 }
1037
1038 rc = RTSemEventMultiCreate(&pWait->Event);
1039 if (RT_FAILURE(rc))
1040 {
1041 if (s_cErrors++ < 32)
1042 LogRel(("VBoxGuestCommonIOCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
1043 RTMemFree(pWait);
1044 return NULL;
1045 }
1046
1047 pWait->ListNode.pNext = NULL;
1048 pWait->ListNode.pPrev = NULL;
1049 }
1050
1051 /*
1052 * Zero members just as an precaution.
1053 */
1054 pWait->fReqEvents = 0;
1055 pWait->fResEvents = 0;
1056#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1057 pWait->fPendingWakeUp = false;
1058 pWait->fFreeMe = false;
1059#endif
1060 pWait->pSession = pSession;
1061#ifdef VBOX_WITH_HGCM
1062 pWait->pHGCMReq = NULL;
1063#endif
1064 RTSemEventMultiReset(pWait->Event);
1065 return pWait;
1066}
1067
1068
1069/**
1070 * Frees the wait-for-event entry.
1071 *
1072 * The caller must own the wait spinlock !
1073 * The entry must be in a list!
1074 *
1075 * @param pDevExt The device extension.
1076 * @param pWait The wait-for-event entry to free.
1077 */
1078static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1079{
1080 pWait->fReqEvents = 0;
1081 pWait->fResEvents = 0;
1082#ifdef VBOX_WITH_HGCM
1083 pWait->pHGCMReq = NULL;
1084#endif
1085#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1086 Assert(!pWait->fFreeMe);
1087 if (pWait->fPendingWakeUp)
1088 pWait->fFreeMe = true;
1089 else
1090#endif
1091 {
1092 RTListNodeRemove(&pWait->ListNode);
1093 RTListAppend(&pDevExt->FreeList, &pWait->ListNode);
1094 }
1095}
1096
1097
1098/**
1099 * Frees the wait-for-event entry.
1100 *
1101 * @param pDevExt The device extension.
1102 * @param pWait The wait-for-event entry to free.
1103 */
1104static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1105{
1106 RTSpinlockAcquire(pDevExt->EventSpinlock);
1107 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1108 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1109}
1110
1111
1112#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1113/**
1114 * Processes the wake-up list.
1115 *
1116 * All entries in the wake-up list gets signalled and moved to the woken-up
1117 * list.
1118 *
1119 * @param pDevExt The device extension.
1120 */
1121void VBoxGuestWaitDoWakeUps(PVBOXGUESTDEVEXT pDevExt)
1122{
1123 if (!RTListIsEmpty(&pDevExt->WakeUpList))
1124 {
1125 RTSpinlockAcquire(pDevExt->EventSpinlock);
1126 for (;;)
1127 {
1128 int rc;
1129 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->WakeUpList, VBOXGUESTWAIT, ListNode);
1130 if (!pWait)
1131 break;
1132 pWait->fPendingWakeUp = true;
1133 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1134
1135 rc = RTSemEventMultiSignal(pWait->Event);
1136 AssertRC(rc);
1137
1138 RTSpinlockAcquire(pDevExt->EventSpinlock);
1139 pWait->fPendingWakeUp = false;
1140 if (!pWait->fFreeMe)
1141 {
1142 RTListNodeRemove(&pWait->ListNode);
1143 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1144 }
1145 else
1146 {
1147 pWait->fFreeMe = false;
1148 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1149 }
1150 }
1151 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1152 }
1153}
1154#endif /* VBOXGUEST_USE_DEFERRED_WAKE_UP */
1155
1156
1157/**
1158 * Modifies the guest capabilities.
1159 *
1160 * Should be called during driver init and termination.
1161 *
1162 * @returns VBox status code.
1163 * @param fOr The Or mask (what to enable).
1164 * @param fNot The Not mask (what to disable).
1165 */
1166int VBoxGuestSetGuestCapabilities(uint32_t fOr, uint32_t fNot)
1167{
1168 VMMDevReqGuestCapabilities2 *pReq;
1169 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
1170 if (RT_FAILURE(rc))
1171 {
1172 Log(("VBoxGuestSetGuestCapabilities: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1173 sizeof(*pReq), sizeof(*pReq), rc));
1174 return rc;
1175 }
1176
1177 pReq->u32OrMask = fOr;
1178 pReq->u32NotMask = fNot;
1179
1180 rc = VbglGRPerform(&pReq->header);
1181 if (RT_FAILURE(rc))
1182 Log(("VBoxGuestSetGuestCapabilities: VbglGRPerform failed, rc=%Rrc!\n", rc));
1183
1184 VbglGRFree(&pReq->header);
1185 return rc;
1186}
1187
1188
1189/**
1190 * Implements the fast (no input or output) type of IOCtls.
1191 *
1192 * This is currently just a placeholder stub inherited from the support driver code.
1193 *
1194 * @returns VBox status code.
1195 * @param iFunction The IOCtl function number.
1196 * @param pDevExt The device extension.
1197 * @param pSession The session.
1198 */
1199int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1200{
1201 Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1202
1203 NOREF(iFunction);
1204 NOREF(pDevExt);
1205 NOREF(pSession);
1206 return VERR_NOT_SUPPORTED;
1207}
1208
1209
1210/**
1211 * Return the VMM device port.
1212 *
1213 * returns IPRT status code.
1214 * @param pDevExt The device extension.
1215 * @param pInfo The request info.
1216 * @param pcbDataReturned (out) contains the number of bytes to return.
1217 */
1218static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
1219{
1220 Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
1221 pInfo->portAddress = pDevExt->IOPortBase;
1222 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
1223 if (pcbDataReturned)
1224 *pcbDataReturned = sizeof(*pInfo);
1225 return VINF_SUCCESS;
1226}
1227
1228
1229/**
1230 * Worker VBoxGuestCommonIOCtl_WaitEvent.
1231 *
1232 * The caller enters the spinlock, we leave it.
1233 *
1234 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
1235 */
1236DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWaitEventInfo *pInfo,
1237 int iEvent, const uint32_t fReqEvents)
1238{
1239 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
1240 if (fMatches)
1241 {
1242 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
1243 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1244
1245 pInfo->u32EventFlagsOut = fMatches;
1246 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1247 if (fReqEvents & ~((uint32_t)1 << iEvent))
1248 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1249 else
1250 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1251 return VINF_SUCCESS;
1252 }
1253 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1254 return VERR_TIMEOUT;
1255}
1256
1257
1258static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1259 VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned, bool fInterruptible)
1260{
1261 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
1262 uint32_t fResEvents;
1263 int iEvent;
1264 PVBOXGUESTWAIT pWait;
1265 int rc;
1266
1267 pInfo->u32EventFlagsOut = 0;
1268 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1269 if (pcbDataReturned)
1270 *pcbDataReturned = sizeof(*pInfo);
1271
1272 /*
1273 * Copy and verify the input mask.
1274 */
1275 iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
1276 if (RT_UNLIKELY(iEvent < 0))
1277 {
1278 Log(("VBoxGuestCommonIOCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
1279 return VERR_INVALID_PARAMETER;
1280 }
1281
1282 /*
1283 * Check the condition up front, before doing the wait-for-event allocations.
1284 */
1285 RTSpinlockAcquire(pDevExt->EventSpinlock);
1286 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents);
1287 if (rc == VINF_SUCCESS)
1288 return rc;
1289
1290 if (!pInfo->u32TimeoutIn)
1291 {
1292 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1293 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
1294 return VERR_TIMEOUT;
1295 }
1296
1297 pWait = VBoxGuestWaitAlloc(pDevExt, pSession);
1298 if (!pWait)
1299 return VERR_NO_MEMORY;
1300 pWait->fReqEvents = fReqEvents;
1301
1302 /*
1303 * We've got the wait entry now, re-enter the spinlock and check for the condition.
1304 * If the wait condition is met, return.
1305 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
1306 */
1307 RTSpinlockAcquire(pDevExt->EventSpinlock);
1308 RTListAppend(&pDevExt->WaitList, &pWait->ListNode);
1309 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents);
1310 if (rc == VINF_SUCCESS)
1311 {
1312 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1313 return rc;
1314 }
1315
1316 if (fInterruptible)
1317 rc = RTSemEventMultiWaitNoResume(pWait->Event,
1318 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1319 else
1320 rc = RTSemEventMultiWait(pWait->Event,
1321 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1322
1323 /*
1324 * There is one special case here and that's when the semaphore is
1325 * destroyed upon device driver unload. This shouldn't happen of course,
1326 * but in case it does, just get out of here ASAP.
1327 */
1328 if (rc == VERR_SEM_DESTROYED)
1329 return rc;
1330
1331 /*
1332 * Unlink the wait item and dispose of it.
1333 */
1334 RTSpinlockAcquire(pDevExt->EventSpinlock);
1335 fResEvents = pWait->fResEvents;
1336 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1337 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1338
1339 /*
1340 * Now deal with the return code.
1341 */
1342 if ( fResEvents
1343 && fResEvents != UINT32_MAX)
1344 {
1345 pInfo->u32EventFlagsOut = fResEvents;
1346 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1347 if (fReqEvents & ~((uint32_t)1 << iEvent))
1348 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1349 else
1350 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1351 rc = VINF_SUCCESS;
1352 }
1353 else if ( fResEvents == UINT32_MAX
1354 || rc == VERR_INTERRUPTED)
1355 {
1356 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
1357 rc = VERR_INTERRUPTED;
1358 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
1359 }
1360 else if (rc == VERR_TIMEOUT)
1361 {
1362 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1363 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT (2)\n"));
1364 }
1365 else
1366 {
1367 if (RT_SUCCESS(rc))
1368 {
1369 static unsigned s_cErrors = 0;
1370 if (s_cErrors++ < 32)
1371 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
1372 rc = VERR_INTERNAL_ERROR;
1373 }
1374 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1375 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc\n", rc));
1376 }
1377
1378 return rc;
1379}
1380
1381
1382static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1383{
1384 PVBOXGUESTWAIT pWait;
1385 PVBOXGUESTWAIT pSafe;
1386 int rc = 0;
1387
1388 Log(("VBoxGuestCommonIOCtl: CANCEL_ALL_WAITEVENTS\n"));
1389
1390 /*
1391 * Walk the event list and wake up anyone with a matching session.
1392 */
1393 RTSpinlockAcquire(pDevExt->EventSpinlock);
1394 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
1395 {
1396 if (pWait->pSession == pSession)
1397 {
1398 pWait->fResEvents = UINT32_MAX;
1399 RTListNodeRemove(&pWait->ListNode);
1400#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1401 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
1402#else
1403 rc |= RTSemEventMultiSignal(pWait->Event);
1404 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1405#endif
1406 }
1407 }
1408 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1409 Assert(rc == 0);
1410
1411#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1412 VBoxGuestWaitDoWakeUps(pDevExt);
1413#endif
1414
1415 return VINF_SUCCESS;
1416}
1417
1418/**
1419 * Checks if the VMM request is allowed in the context of the given session.
1420 *
1421 * @returns VINF_SUCCESS or VERR_PERMISSION_DENIED.
1422 * @param pSession The calling session.
1423 * @param enmType The request type.
1424 * @param pReqHdr The request.
1425 */
1426static int VBoxGuestCheckIfVMMReqAllowed(PVBOXGUESTSESSION pSession, VMMDevRequestType enmType,
1427 VMMDevRequestHeader const *pReqHdr)
1428{
1429 /*
1430 * Categorize the request being made.
1431 */
1432 /** @todo This need quite some more work! */
1433 enum
1434 {
1435 kLevel_Invalid, kLevel_NoOne, kLevel_OnlyVBoxGuest, kLevel_OnlyKernel, kLevel_TrustedUsers, kLevel_AllUsers
1436 } enmRequired;
1437 switch (enmType)
1438 {
1439 /*
1440 * Deny access to anything we don't know or provide specialized I/O controls for.
1441 */
1442#ifdef VBOX_WITH_HGCM
1443 case VMMDevReq_HGCMConnect:
1444 case VMMDevReq_HGCMDisconnect:
1445# ifdef VBOX_WITH_64_BITS_GUESTS
1446 case VMMDevReq_HGCMCall32:
1447 case VMMDevReq_HGCMCall64:
1448# else
1449 case VMMDevReq_HGCMCall:
1450# endif /* VBOX_WITH_64_BITS_GUESTS */
1451 case VMMDevReq_HGCMCancel:
1452 case VMMDevReq_HGCMCancel2:
1453#endif /* VBOX_WITH_HGCM */
1454 default:
1455 enmRequired = kLevel_NoOne;
1456 break;
1457
1458 /*
1459 * There are a few things only this driver can do (and it doesn't use
1460 * the VMMRequst I/O control route anyway, but whatever).
1461 */
1462 case VMMDevReq_ReportGuestInfo:
1463 case VMMDevReq_ReportGuestInfo2:
1464 case VMMDevReq_GetHypervisorInfo:
1465 case VMMDevReq_SetHypervisorInfo:
1466 case VMMDevReq_RegisterPatchMemory:
1467 case VMMDevReq_DeregisterPatchMemory:
1468 case VMMDevReq_GetMemBalloonChangeRequest:
1469 enmRequired = kLevel_OnlyVBoxGuest;
1470 break;
1471
1472 /*
1473 * Trusted users apps only.
1474 */
1475 case VMMDevReq_QueryCredentials:
1476 case VMMDevReq_ReportCredentialsJudgement:
1477 case VMMDevReq_RegisterSharedModule:
1478 case VMMDevReq_UnregisterSharedModule:
1479 case VMMDevReq_WriteCoreDump:
1480 case VMMDevReq_GetCpuHotPlugRequest:
1481 case VMMDevReq_SetCpuHotPlugStatus:
1482 case VMMDevReq_CheckSharedModules:
1483 case VMMDevReq_GetPageSharingStatus:
1484 case VMMDevReq_DebugIsPageShared:
1485 case VMMDevReq_ReportGuestStats:
1486 case VMMDevReq_GetStatisticsChangeRequest:
1487 case VMMDevReq_ChangeMemBalloon:
1488 enmRequired = kLevel_TrustedUsers;
1489 break;
1490
1491 /*
1492 * Anyone.
1493 */
1494 case VMMDevReq_GetMouseStatus:
1495 case VMMDevReq_SetMouseStatus:
1496 case VMMDevReq_SetPointerShape:
1497 case VMMDevReq_GetHostVersion:
1498 case VMMDevReq_Idle:
1499 case VMMDevReq_GetHostTime:
1500 case VMMDevReq_SetPowerStatus:
1501 case VMMDevReq_AcknowledgeEvents:
1502 case VMMDevReq_CtlGuestFilterMask:
1503 case VMMDevReq_ReportGuestStatus:
1504 case VMMDevReq_GetDisplayChangeRequest:
1505 case VMMDevReq_VideoModeSupported:
1506 case VMMDevReq_GetHeightReduction:
1507 case VMMDevReq_GetDisplayChangeRequest2:
1508 case VMMDevReq_SetGuestCapabilities:
1509 case VMMDevReq_VideoModeSupported2:
1510 case VMMDevReq_VideoAccelEnable:
1511 case VMMDevReq_VideoAccelFlush:
1512 case VMMDevReq_VideoSetVisibleRegion:
1513 case VMMDevReq_GetSeamlessChangeRequest:
1514 case VMMDevReq_GetVRDPChangeRequest:
1515 case VMMDevReq_LogString:
1516 case VMMDevReq_GetSessionId:
1517 enmRequired = kLevel_AllUsers;
1518 break;
1519
1520 /*
1521 * Depends on the request parameters...
1522 */
1523 /** @todo this have to be changed into an I/O control and the facilities
1524 * tracked in the session so they can automatically be failed when the
1525 * session terminates without reporting the new status.
1526 *
1527 * The information presented by IGuest is not reliable without this! */
1528 case VMMDevReq_ReportGuestCapabilities:
1529 switch (((VMMDevReportGuestStatus const *)pReqHdr)->guestStatus.facility)
1530 {
1531 case VBoxGuestFacilityType_All:
1532 case VBoxGuestFacilityType_VBoxGuestDriver:
1533 enmRequired = kLevel_OnlyVBoxGuest;
1534 break;
1535 case VBoxGuestFacilityType_VBoxService:
1536 enmRequired = kLevel_TrustedUsers;
1537 break;
1538 case VBoxGuestFacilityType_VBoxTrayClient:
1539 case VBoxGuestFacilityType_Seamless:
1540 case VBoxGuestFacilityType_Graphics:
1541 default:
1542 enmRequired = kLevel_AllUsers;
1543 break;
1544 }
1545 break;
1546 }
1547
1548 /*
1549 * Check against the session.
1550 */
1551 switch (enmRequired)
1552 {
1553 default:
1554 case kLevel_NoOne:
1555 break;
1556 case kLevel_OnlyVBoxGuest:
1557 case kLevel_OnlyKernel:
1558 if (pSession->R0Process == NIL_RTR0PROCESS)
1559 return VINF_SUCCESS;
1560 break;
1561 case kLevel_TrustedUsers:
1562 case kLevel_AllUsers:
1563 return VINF_SUCCESS;
1564 }
1565
1566 return VERR_PERMISSION_DENIED;
1567}
1568
1569static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1570 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
1571{
1572 int rc;
1573 VMMDevRequestHeader *pReqCopy;
1574
1575 /*
1576 * Validate the header and request size.
1577 */
1578 const VMMDevRequestType enmType = pReqHdr->requestType;
1579 const uint32_t cbReq = pReqHdr->size;
1580 const uint32_t cbMinSize = vmmdevGetRequestSize(enmType);
1581
1582 Log(("VBoxGuestCommonIOCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
1583
1584 if (cbReq < cbMinSize)
1585 {
1586 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
1587 cbReq, cbMinSize, enmType));
1588 return VERR_INVALID_PARAMETER;
1589 }
1590 if (cbReq > cbData)
1591 {
1592 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
1593 cbData, cbReq, enmType));
1594 return VERR_INVALID_PARAMETER;
1595 }
1596 rc = VbglGRVerify(pReqHdr, cbData);
1597 if (RT_FAILURE(rc))
1598 {
1599 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc!!\n",
1600 cbData, cbReq, enmType, rc));
1601 return rc;
1602 }
1603
1604 rc = VBoxGuestCheckIfVMMReqAllowed(pSession, enmType, pReqHdr);
1605 if (RT_FAILURE(rc))
1606 {
1607 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: Operation not allowed! type=%#x rc=%Rrc\n", enmType, rc));
1608 return rc;
1609 }
1610
1611 /*
1612 * Make a copy of the request in the physical memory heap so
1613 * the VBoxGuestLibrary can more easily deal with the request.
1614 * (This is really a waste of time since the OS or the OS specific
1615 * code has already buffered or locked the input/output buffer, but
1616 * it does makes things a bit simpler wrt to phys address.)
1617 */
1618 rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
1619 if (RT_FAILURE(rc))
1620 {
1621 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1622 cbReq, cbReq, rc));
1623 return rc;
1624 }
1625 memcpy(pReqCopy, pReqHdr, cbReq);
1626
1627 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
1628 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
1629
1630 rc = VbglGRPerform(pReqCopy);
1631 if ( RT_SUCCESS(rc)
1632 && RT_SUCCESS(pReqCopy->rc))
1633 {
1634 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
1635 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
1636
1637 memcpy(pReqHdr, pReqCopy, cbReq);
1638 if (pcbDataReturned)
1639 *pcbDataReturned = cbReq;
1640 }
1641 else if (RT_FAILURE(rc))
1642 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
1643 else
1644 {
1645 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
1646 rc = pReqCopy->rc;
1647 }
1648
1649 VbglGRFree(pReqCopy);
1650 return rc;
1651}
1652
1653
1654static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, VBoxGuestFilterMaskInfo *pInfo)
1655{
1656 VMMDevCtlGuestFilterMask *pReq;
1657 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
1658 if (RT_FAILURE(rc))
1659 {
1660 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1661 sizeof(*pReq), sizeof(*pReq), rc));
1662 return rc;
1663 }
1664
1665 pReq->u32OrMask = pInfo->u32OrMask;
1666 pReq->u32NotMask = pInfo->u32NotMask;
1667 pReq->u32NotMask &= ~pDevExt->fFixedEvents; /* don't permit these to be cleared! */
1668 rc = VbglGRPerform(&pReq->header);
1669 if (RT_FAILURE(rc))
1670 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: VbglGRPerform failed, rc=%Rrc!\n", rc));
1671
1672 VbglGRFree(&pReq->header);
1673 return rc;
1674}
1675
1676#ifdef VBOX_WITH_HGCM
1677
1678AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
1679
1680/** Worker for VBoxGuestHGCMAsyncWaitCallback*. */
1681static int VBoxGuestHGCMAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
1682 bool fInterruptible, uint32_t cMillies)
1683{
1684 int rc;
1685
1686 /*
1687 * Check to see if the condition was met by the time we got here.
1688 *
1689 * We create a simple poll loop here for dealing with out-of-memory
1690 * conditions since the caller isn't necessarily able to deal with
1691 * us returning too early.
1692 */
1693 PVBOXGUESTWAIT pWait;
1694 for (;;)
1695 {
1696 RTSpinlockAcquire(pDevExt->EventSpinlock);
1697 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1698 {
1699 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1700 return VINF_SUCCESS;
1701 }
1702 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1703
1704 pWait = VBoxGuestWaitAlloc(pDevExt, NULL);
1705 if (pWait)
1706 break;
1707 if (fInterruptible)
1708 return VERR_INTERRUPTED;
1709 RTThreadSleep(1);
1710 }
1711 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
1712 pWait->pHGCMReq = pHdr;
1713
1714 /*
1715 * Re-enter the spinlock and re-check for the condition.
1716 * If the condition is met, return.
1717 * Otherwise link us into the HGCM wait list and go to sleep.
1718 */
1719 RTSpinlockAcquire(pDevExt->EventSpinlock);
1720 RTListAppend(&pDevExt->HGCMWaitList, &pWait->ListNode);
1721 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1722 {
1723 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1724 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1725 return VINF_SUCCESS;
1726 }
1727 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
1728
1729 if (fInterruptible)
1730 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
1731 else
1732 rc = RTSemEventMultiWait(pWait->Event, cMillies);
1733 if (rc == VERR_SEM_DESTROYED)
1734 return rc;
1735
1736 /*
1737 * Unlink, free and return.
1738 */
1739 if ( RT_FAILURE(rc)
1740 && rc != VERR_TIMEOUT
1741 && ( !fInterruptible
1742 || rc != VERR_INTERRUPTED))
1743 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
1744
1745 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1746 return rc;
1747}
1748
1749
1750/**
1751 * This is a callback for dealing with async waits.
1752 *
1753 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1754 */
1755static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
1756{
1757 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1758 Log(("VBoxGuestHGCMAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
1759 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1760 pDevExt,
1761 false /* fInterruptible */,
1762 u32User /* cMillies */);
1763}
1764
1765
1766/**
1767 * This is a callback for dealing with async waits with a timeout.
1768 *
1769 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1770 */
1771static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr,
1772 void *pvUser, uint32_t u32User)
1773{
1774 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1775 Log(("VBoxGuestHGCMAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
1776 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1777 pDevExt,
1778 true /* fInterruptible */,
1779 u32User /* cMillies */ );
1780
1781}
1782
1783
1784static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1785 VBoxGuestHGCMConnectInfo *pInfo, size_t *pcbDataReturned)
1786{
1787 int rc;
1788
1789 /*
1790 * The VbglHGCMConnect call will invoke the callback if the HGCM
1791 * call is performed in an ASYNC fashion. The function is not able
1792 * to deal with cancelled requests.
1793 */
1794 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
1795 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
1796 ? pInfo->Loc.u.host.achName : "<not local host>"));
1797
1798 rc = VbglR0HGCMInternalConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1799 if (RT_SUCCESS(rc))
1800 {
1801 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
1802 pInfo->u32ClientID, pInfo->result, rc));
1803 if (RT_SUCCESS(pInfo->result))
1804 {
1805 /*
1806 * Append the client id to the client id table.
1807 * If the table has somehow become filled up, we'll disconnect the session.
1808 */
1809 unsigned i;
1810 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1811 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1812 if (!pSession->aHGCMClientIds[i])
1813 {
1814 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
1815 break;
1816 }
1817 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
1818 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1819 {
1820 static unsigned s_cErrors = 0;
1821 VBoxGuestHGCMDisconnectInfo Info;
1822
1823 if (s_cErrors++ < 32)
1824 LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
1825
1826 Info.result = 0;
1827 Info.u32ClientID = pInfo->u32ClientID;
1828 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1829 return VERR_TOO_MANY_OPEN_FILES;
1830 }
1831 }
1832 if (pcbDataReturned)
1833 *pcbDataReturned = sizeof(*pInfo);
1834 }
1835 return rc;
1836}
1837
1838
1839static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
1840 size_t *pcbDataReturned)
1841{
1842 /*
1843 * Validate the client id and invalidate its entry while we're in the call.
1844 */
1845 int rc;
1846 const uint32_t u32ClientId = pInfo->u32ClientID;
1847 unsigned i;
1848 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1849 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1850 if (pSession->aHGCMClientIds[i] == u32ClientId)
1851 {
1852 pSession->aHGCMClientIds[i] = UINT32_MAX;
1853 break;
1854 }
1855 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
1856 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1857 {
1858 static unsigned s_cErrors = 0;
1859 if (s_cErrors++ > 32)
1860 LogRel(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
1861 return VERR_INVALID_HANDLE;
1862 }
1863
1864 /*
1865 * The VbglHGCMConnect call will invoke the callback if the HGCM
1866 * call is performed in an ASYNC fashion. The function is not able
1867 * to deal with cancelled requests.
1868 */
1869 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
1870 rc = VbglR0HGCMInternalDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1871 if (RT_SUCCESS(rc))
1872 {
1873 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
1874 if (pcbDataReturned)
1875 *pcbDataReturned = sizeof(*pInfo);
1876 }
1877
1878 /* Update the client id array according to the result. */
1879 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1880 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
1881 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
1882 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
1883
1884 return rc;
1885}
1886
1887
1888static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt,
1889 PVBOXGUESTSESSION pSession,
1890 VBoxGuestHGCMCallInfo *pInfo,
1891 uint32_t cMillies, bool fInterruptible, bool f32bit, bool fUserData,
1892 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
1893{
1894 const uint32_t u32ClientId = pInfo->u32ClientID;
1895 uint32_t fFlags;
1896 size_t cbActual;
1897 unsigned i;
1898 int rc;
1899
1900 /*
1901 * Some more validations.
1902 */
1903 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
1904 {
1905 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
1906 return VERR_INVALID_PARAMETER;
1907 }
1908
1909 cbActual = cbExtra + sizeof(*pInfo);
1910#ifdef RT_ARCH_AMD64
1911 if (f32bit)
1912 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
1913 else
1914#endif
1915 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
1916 if (cbData < cbActual)
1917 {
1918 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
1919 cbData, cbActual));
1920 return VERR_INVALID_PARAMETER;
1921 }
1922
1923 /*
1924 * Validate the client id.
1925 */
1926 RTSpinlockAcquire(pDevExt->SessionSpinlock);
1927 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1928 if (pSession->aHGCMClientIds[i] == u32ClientId)
1929 break;
1930 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
1931 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
1932 {
1933 static unsigned s_cErrors = 0;
1934 if (s_cErrors++ > 32)
1935 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
1936 return VERR_INVALID_HANDLE;
1937 }
1938
1939 /*
1940 * The VbglHGCMCall call will invoke the callback if the HGCM
1941 * call is performed in an ASYNC fashion. This function can
1942 * deal with cancelled requests, so we let user more requests
1943 * be interruptible (should add a flag for this later I guess).
1944 */
1945 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
1946 fFlags = !fUserData && pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
1947#ifdef RT_ARCH_AMD64
1948 if (f32bit)
1949 {
1950 if (fInterruptible)
1951 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1952 else
1953 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1954 }
1955 else
1956#endif
1957 {
1958 if (fInterruptible)
1959 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1960 else
1961 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1962 }
1963 if (RT_SUCCESS(rc))
1964 {
1965 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
1966 if (pcbDataReturned)
1967 *pcbDataReturned = cbActual;
1968 }
1969 else
1970 {
1971 if ( rc != VERR_INTERRUPTED
1972 && rc != VERR_TIMEOUT)
1973 {
1974 static unsigned s_cErrors = 0;
1975 if (s_cErrors++ < 32)
1976 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
1977 }
1978 else
1979 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
1980 }
1981 return rc;
1982}
1983
1984
1985#endif /* VBOX_WITH_HGCM */
1986
1987/**
1988 * Handle VBOXGUEST_IOCTL_CHECK_BALLOON from R3.
1989 *
1990 * Ask the host for the size of the balloon and try to set it accordingly. If
1991 * this approach fails because it's not supported, return with fHandleInR3 set
1992 * and let the user land supply memory we can lock via the other ioctl.
1993 *
1994 * @returns VBox status code.
1995 *
1996 * @param pDevExt The device extension.
1997 * @param pSession The session.
1998 * @param pInfo The output buffer.
1999 * @param pcbDataReturned Where to store the amount of returned data. Can
2000 * be NULL.
2001 */
2002static int VBoxGuestCommonIOCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2003 VBoxGuestCheckBalloonInfo *pInfo, size_t *pcbDataReturned)
2004{
2005 VMMDevGetMemBalloonChangeRequest *pReq;
2006 int rc;
2007
2008 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON\n"));
2009 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2010 AssertRCReturn(rc, rc);
2011
2012 /*
2013 * The first user trying to query/change the balloon becomes the
2014 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
2015 */
2016 if ( pDevExt->MemBalloon.pOwner != pSession
2017 && pDevExt->MemBalloon.pOwner == NULL)
2018 pDevExt->MemBalloon.pOwner = pSession;
2019
2020 if (pDevExt->MemBalloon.pOwner == pSession)
2021 {
2022 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
2023 if (RT_SUCCESS(rc))
2024 {
2025 /*
2026 * This is a response to that event. Setting this bit means that
2027 * we request the value from the host and change the guest memory
2028 * balloon according to this value.
2029 */
2030 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
2031 rc = VbglGRPerform(&pReq->header);
2032 if (RT_SUCCESS(rc))
2033 {
2034 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
2035 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
2036
2037 pInfo->cBalloonChunks = pReq->cBalloonChunks;
2038 pInfo->fHandleInR3 = false;
2039
2040 rc = vboxGuestSetBalloonSizeKernel(pDevExt, pReq->cBalloonChunks, &pInfo->fHandleInR3);
2041 /* Ignore various out of memory failures. */
2042 if ( rc == VERR_NO_MEMORY
2043 || rc == VERR_NO_PHYS_MEMORY
2044 || rc == VERR_NO_CONT_MEMORY)
2045 rc = VINF_SUCCESS;
2046
2047 if (pcbDataReturned)
2048 *pcbDataReturned = sizeof(VBoxGuestCheckBalloonInfo);
2049 }
2050 else
2051 LogRel(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON: VbglGRPerform failed. rc=%Rrc\n", rc));
2052 VbglGRFree(&pReq->header);
2053 }
2054 }
2055 else
2056 rc = VERR_PERMISSION_DENIED;
2057
2058 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2059 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON returns %Rrc\n", rc));
2060 return rc;
2061}
2062
2063
2064/**
2065 * Handle a request for changing the memory balloon.
2066 *
2067 * @returns VBox status code.
2068 *
2069 * @param pDevExt The device extention.
2070 * @param pSession The session.
2071 * @param pInfo The change request structure (input).
2072 * @param pcbDataReturned Where to store the amount of returned data. Can
2073 * be NULL.
2074 */
2075static int VBoxGuestCommonIOCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2076 VBoxGuestChangeBalloonInfo *pInfo, size_t *pcbDataReturned)
2077{
2078 int rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
2079 AssertRCReturn(rc, rc);
2080
2081 if (!pDevExt->MemBalloon.fUseKernelAPI)
2082 {
2083 /*
2084 * The first user trying to query/change the balloon becomes the
2085 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
2086 */
2087 if ( pDevExt->MemBalloon.pOwner != pSession
2088 && pDevExt->MemBalloon.pOwner == NULL)
2089 pDevExt->MemBalloon.pOwner = pSession;
2090
2091 if (pDevExt->MemBalloon.pOwner == pSession)
2092 {
2093 rc = vboxGuestSetBalloonSizeFromUser(pDevExt, pSession, pInfo->u64ChunkAddr, !!pInfo->fInflate);
2094 if (pcbDataReturned)
2095 *pcbDataReturned = 0;
2096 }
2097 else
2098 rc = VERR_PERMISSION_DENIED;
2099 }
2100 else
2101 rc = VERR_PERMISSION_DENIED;
2102
2103 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2104 return rc;
2105}
2106
2107
2108/**
2109 * Handle a request for writing a core dump of the guest on the host.
2110 *
2111 * @returns VBox status code.
2112 *
2113 * @param pDevExt The device extension.
2114 * @param pInfo The output buffer.
2115 */
2116static int VBoxGuestCommonIOCtl_WriteCoreDump(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWriteCoreDump *pInfo)
2117{
2118 VMMDevReqWriteCoreDump *pReq = NULL;
2119 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_WriteCoreDump);
2120 if (RT_FAILURE(rc))
2121 {
2122 Log(("VBoxGuestCommonIOCtl: WRITE_CORE_DUMP: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
2123 sizeof(*pReq), sizeof(*pReq), rc));
2124 return rc;
2125 }
2126
2127 pReq->fFlags = pInfo->fFlags;
2128 rc = VbglGRPerform(&pReq->header);
2129 if (RT_FAILURE(rc))
2130 Log(("VBoxGuestCommonIOCtl: WRITE_CORE_DUMP: VbglGRPerform failed, rc=%Rrc!\n", rc));
2131
2132 VbglGRFree(&pReq->header);
2133 return rc;
2134}
2135
2136
2137#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
2138/**
2139 * Enables the VRDP session and saves its session ID.
2140 *
2141 * @returns VBox status code.
2142 *
2143 * @param pDevExt The device extention.
2144 * @param pSession The session.
2145 */
2146static int VBoxGuestCommonIOCtl_EnableVRDPSession(VBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2147{
2148 /* Nothing to do here right now, since this only is supported on Windows at the moment. */
2149 return VERR_NOT_IMPLEMENTED;
2150}
2151
2152
2153/**
2154 * Disables the VRDP session.
2155 *
2156 * @returns VBox status code.
2157 *
2158 * @param pDevExt The device extention.
2159 * @param pSession The session.
2160 */
2161static int VBoxGuestCommonIOCtl_DisableVRDPSession(VBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2162{
2163 /* Nothing to do here right now, since this only is supported on Windows at the moment. */
2164 return VERR_NOT_IMPLEMENTED;
2165}
2166#endif /* VBOX_WITH_VRDP_SESSION_HANDLING */
2167
2168#ifdef DEBUG
2169/** Unit test SetMouseStatus instead of really executing the request. */
2170static bool g_test_fSetMouseStatus = false;
2171/** When unit testing SetMouseStatus, the fake RC for the GR to return. */
2172static int g_test_SetMouseStatusGRRC;
2173/** When unit testing SetMouseStatus this will be set to the status passed to
2174 * the GR. */
2175static uint32_t g_test_statusSetMouseStatus;
2176#endif
2177
2178static int vboxguestcommonSetMouseStatus(uint32_t fFeatures)
2179{
2180 VMMDevReqMouseStatus *pReq;
2181 int rc;
2182
2183 LogRelFlowFunc(("fFeatures=%u\n", (int) fFeatures));
2184 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetMouseStatus);
2185 if (RT_SUCCESS(rc))
2186 {
2187 pReq->mouseFeatures = fFeatures;
2188 pReq->pointerXPos = 0;
2189 pReq->pointerYPos = 0;
2190#ifdef DEBUG
2191 if (g_test_fSetMouseStatus)
2192 {
2193 g_test_statusSetMouseStatus = pReq->mouseFeatures;
2194 rc = g_test_SetMouseStatusGRRC;
2195 }
2196 else
2197#endif
2198 rc = VbglGRPerform(&pReq->header);
2199 VbglGRFree(&pReq->header);
2200 }
2201 LogRelFlowFunc(("rc=%Rrc\n", rc));
2202 return rc;
2203}
2204
2205
2206/**
2207 * Sets the mouse status features for this session and updates them
2208 * globally. We aim to ensure that if several threads call this in
2209 * parallel the most recent status will always end up being set.
2210 *
2211 * @returns VBox status code.
2212 *
2213 * @param pDevExt The device extention.
2214 * @param pSession The session.
2215 * @param fFeatures New bitmap of enabled features.
2216 */
2217static int VBoxGuestCommonIOCtl_SetMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fFeatures)
2218{
2219 uint32_t fNewDevExtStatus = 0;
2220 unsigned i;
2221 int rc;
2222 /* Exit early if nothing has changed - hack to work around the
2223 * Windows Additions not using the common code. */
2224 bool fNoAction;
2225
2226 RTSpinlockAcquire(pDevExt->SessionSpinlock);
2227
2228 for (i = 0; i < sizeof(fFeatures) * 8; i++)
2229 {
2230 if (RT_BIT_32(i) & VMMDEV_MOUSE_GUEST_MASK)
2231 {
2232 if ( (RT_BIT_32(i) & fFeatures)
2233 && !(RT_BIT_32(i) & pSession->fMouseStatus))
2234 pDevExt->acMouseFeatureUsage[i]++;
2235 else if ( !(RT_BIT_32(i) & fFeatures)
2236 && (RT_BIT_32(i) & pSession->fMouseStatus))
2237 pDevExt->acMouseFeatureUsage[i]--;
2238 }
2239 if (pDevExt->acMouseFeatureUsage[i] > 0)
2240 fNewDevExtStatus |= RT_BIT_32(i);
2241 }
2242
2243 pSession->fMouseStatus = fFeatures & VMMDEV_MOUSE_GUEST_MASK;
2244 fNoAction = (pDevExt->fMouseStatus == fNewDevExtStatus);
2245 pDevExt->fMouseStatus = fNewDevExtStatus;
2246
2247 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock);
2248 if (fNoAction)
2249 return VINF_SUCCESS;
2250
2251 do
2252 {
2253 fNewDevExtStatus = pDevExt->fMouseStatus;
2254 rc = vboxguestcommonSetMouseStatus(fNewDevExtStatus);
2255 } while ( RT_SUCCESS(rc)
2256 && fNewDevExtStatus != pDevExt->fMouseStatus);
2257
2258 return rc;
2259}
2260
2261
2262#ifdef DEBUG
2263/** Unit test for the SET_MOUSE_STATUS IoCtl. Since this is closely tied to
2264 * the code in question it probably makes most sense to keep it next to the
2265 * code. */
2266static void testSetMouseStatus(void)
2267{
2268 uint32_t u32Data;
2269 int rc;
2270 RTSPINLOCK Spinlock;
2271
2272 g_test_fSetMouseStatus = true;
2273 rc = RTSpinlockCreate(&Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxGuestTest");
2274 AssertRCReturnVoid(rc);
2275 {
2276 VBOXGUESTDEVEXT DevExt = { 0 };
2277 VBOXGUESTSESSION Session = { 0 };
2278
2279 g_test_statusSetMouseStatus = ~0;
2280 g_test_SetMouseStatusGRRC = VINF_SUCCESS;
2281 DevExt.SessionSpinlock = Spinlock;
2282 u32Data = VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE;
2283 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2284 &Session, &u32Data, sizeof(u32Data), NULL);
2285 AssertRCSuccess(rc);
2286 AssertMsg( g_test_statusSetMouseStatus
2287 == VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE,
2288 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2289 DevExt.acMouseFeatureUsage[ASMBitFirstSetU32(VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR) - 1] = 1;
2290 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2291 &Session, &u32Data, sizeof(u32Data), NULL);
2292 AssertRCSuccess(rc);
2293 AssertMsg( g_test_statusSetMouseStatus
2294 == ( VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE
2295 | VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR),
2296 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2297 u32Data = VMMDEV_MOUSE_HOST_WANTS_ABSOLUTE; /* Can't change this */
2298 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2299 &Session, &u32Data, sizeof(u32Data), NULL);
2300 AssertRCSuccess(rc);
2301 AssertMsg( g_test_statusSetMouseStatus
2302 == VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR,
2303 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2304 u32Data = VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR;
2305 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2306 &Session, &u32Data, sizeof(u32Data), NULL);
2307 AssertRCSuccess(rc);
2308 AssertMsg( g_test_statusSetMouseStatus
2309 == VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR,
2310 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2311 u32Data = 0;
2312 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2313 &Session, &u32Data, sizeof(u32Data), NULL);
2314 AssertRCSuccess(rc);
2315 AssertMsg( g_test_statusSetMouseStatus
2316 == VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR,
2317 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2318 AssertMsg(DevExt.acMouseFeatureUsage[ASMBitFirstSetU32(VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR) - 1] == 1,
2319 ("Actual value: %d\n", DevExt.acMouseFeatureUsage[ASMBitFirstSetU32(VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR)]));
2320 g_test_SetMouseStatusGRRC = VERR_UNRESOLVED_ERROR;
2321 /* This should succeed as the host request should not be made
2322 * since nothing has changed. */
2323 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2324 &Session, &u32Data, sizeof(u32Data), NULL);
2325 AssertRCSuccess(rc);
2326 /* This should fail with VERR_UNRESOLVED_ERROR as set above. */
2327 u32Data = VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE;
2328 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2329 &Session, &u32Data, sizeof(u32Data), NULL);
2330 AssertMsg(rc == VERR_UNRESOLVED_ERROR, ("rc == %Rrc\n", rc));
2331 /* Untested paths: out of memory; race setting status to host */
2332 }
2333 RTSpinlockDestroy(Spinlock);
2334 g_test_fSetMouseStatus = false;
2335}
2336#endif
2337
2338
2339/**
2340 * Guest backdoor logging.
2341 *
2342 * @returns VBox status code.
2343 *
2344 * @param pDevExt The device extension.
2345 * @param pch The log message (need not be NULL terminated).
2346 * @param cbData Size of the buffer.
2347 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2348 */
2349static int VBoxGuestCommonIOCtl_Log(PVBOXGUESTDEVEXT pDevExt, const char *pch, size_t cbData, size_t *pcbDataReturned)
2350{
2351 NOREF(pch);
2352 NOREF(cbData);
2353 if (pDevExt->fLoggingEnabled)
2354 RTLogBackdoorPrintf("%.*s", cbData, pch);
2355 else
2356 Log(("%.*s", cbData, pch));
2357 if (pcbDataReturned)
2358 *pcbDataReturned = 0;
2359 return VINF_SUCCESS;
2360}
2361
2362
2363/**
2364 * Common IOCtl for user to kernel and kernel to kernel communication.
2365 *
2366 * This function only does the basic validation and then invokes
2367 * worker functions that takes care of each specific function.
2368 *
2369 * @returns VBox status code.
2370 *
2371 * @param iFunction The requested function.
2372 * @param pDevExt The device extension.
2373 * @param pSession The client session.
2374 * @param pvData The input/output data buffer. Can be NULL depending on the function.
2375 * @param cbData The max size of the data buffer.
2376 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2377 */
2378int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2379 void *pvData, size_t cbData, size_t *pcbDataReturned)
2380{
2381 int rc;
2382 Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
2383 iFunction, pDevExt, pSession, pvData, cbData));
2384
2385 /*
2386 * Make sure the returned data size is set to zero.
2387 */
2388 if (pcbDataReturned)
2389 *pcbDataReturned = 0;
2390
2391 /*
2392 * Define some helper macros to simplify validation.
2393 */
2394#define CHECKRET_RING0(mnemonic) \
2395 do { \
2396 if (pSession->R0Process != NIL_RTR0PROCESS) \
2397 { \
2398 LogFunc((mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
2399 pSession->Process, (uintptr_t)pSession->R0Process)); \
2400 return VERR_PERMISSION_DENIED; \
2401 } \
2402 } while (0)
2403#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
2404 do { \
2405 if (cbData < (cbMin)) \
2406 { \
2407 LogFunc((mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
2408 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
2409 return VERR_BUFFER_OVERFLOW; \
2410 } \
2411 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
2412 { \
2413 LogFunc((mnemonic ": Invalid pointer %p\n", pvData)); \
2414 return VERR_INVALID_POINTER; \
2415 } \
2416 } while (0)
2417#define CHECKRET_SIZE(mnemonic, cb) \
2418 do { \
2419 if (cbData != (cb)) \
2420 { \
2421 LogFunc((mnemonic ": cbData=%#zx (%zu) expected is %#zx (%zu)\n", \
2422 cbData, cbData, (size_t)(cb), (size_t)(cb))); \
2423 return VERR_BUFFER_OVERFLOW; \
2424 } \
2425 if ((cb) != 0 && !VALID_PTR(pvData)) \
2426 { \
2427 LogFunc((mnemonic ": Invalid pointer %p\n", pvData)); \
2428 return VERR_INVALID_POINTER; \
2429 } \
2430 } while (0)
2431
2432
2433 /*
2434 * Deal with variably sized requests first.
2435 */
2436 rc = VINF_SUCCESS;
2437 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
2438 {
2439 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
2440 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
2441 }
2442#ifdef VBOX_WITH_HGCM
2443 /*
2444 * These ones are a bit tricky.
2445 */
2446 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
2447 {
2448 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2449 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2450 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2451 fInterruptible, false /*f32bit*/, false /* fUserData */,
2452 0, cbData, pcbDataReturned);
2453 }
2454 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
2455 {
2456 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2457 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2458 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2459 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2460 false /*f32bit*/, false /* fUserData */,
2461 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2462 }
2463 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_USERDATA(0)))
2464 {
2465 bool fInterruptible = true;
2466 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2467 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2468 fInterruptible, false /*f32bit*/, true /* fUserData */,
2469 0, cbData, pcbDataReturned);
2470 }
2471# ifdef RT_ARCH_AMD64
2472 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
2473 {
2474 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2475 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2476 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2477 fInterruptible, true /*f32bit*/, false /* fUserData */,
2478 0, cbData, pcbDataReturned);
2479 }
2480 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
2481 {
2482 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2483 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2484 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2485 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2486 true /*f32bit*/, false /* fUserData */,
2487 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2488 }
2489# endif
2490#endif /* VBOX_WITH_HGCM */
2491 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
2492 {
2493 CHECKRET_MIN_SIZE("LOG", 1);
2494 rc = VBoxGuestCommonIOCtl_Log(pDevExt, (char *)pvData, cbData, pcbDataReturned);
2495 }
2496 else
2497 {
2498 switch (iFunction)
2499 {
2500 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
2501 CHECKRET_RING0("GETVMMDEVPORT");
2502 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
2503 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
2504 break;
2505
2506 case VBOXGUEST_IOCTL_WAITEVENT:
2507 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
2508 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, pSession, (VBoxGuestWaitEventInfo *)pvData,
2509 pcbDataReturned, pSession->R0Process != NIL_RTR0PROCESS);
2510 break;
2511
2512 case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
2513 if (cbData != 0)
2514 rc = VERR_INVALID_PARAMETER;
2515 rc = VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
2516 break;
2517
2518 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
2519 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
2520 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, (VBoxGuestFilterMaskInfo *)pvData);
2521 break;
2522
2523#ifdef VBOX_WITH_HGCM
2524 case VBOXGUEST_IOCTL_HGCM_CONNECT:
2525# ifdef RT_ARCH_AMD64
2526 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
2527# endif
2528 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
2529 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
2530 break;
2531
2532 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
2533# ifdef RT_ARCH_AMD64
2534 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
2535# endif
2536 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
2537 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
2538 break;
2539#endif /* VBOX_WITH_HGCM */
2540
2541 case VBOXGUEST_IOCTL_CHECK_BALLOON:
2542 CHECKRET_MIN_SIZE("CHECK_MEMORY_BALLOON", sizeof(VBoxGuestCheckBalloonInfo));
2543 rc = VBoxGuestCommonIOCtl_CheckMemoryBalloon(pDevExt, pSession, (VBoxGuestCheckBalloonInfo *)pvData, pcbDataReturned);
2544 break;
2545
2546 case VBOXGUEST_IOCTL_CHANGE_BALLOON:
2547 CHECKRET_MIN_SIZE("CHANGE_MEMORY_BALLOON", sizeof(VBoxGuestChangeBalloonInfo));
2548 rc = VBoxGuestCommonIOCtl_ChangeMemoryBalloon(pDevExt, pSession, (VBoxGuestChangeBalloonInfo *)pvData, pcbDataReturned);
2549 break;
2550
2551 case VBOXGUEST_IOCTL_WRITE_CORE_DUMP:
2552 CHECKRET_MIN_SIZE("WRITE_CORE_DUMP", sizeof(VBoxGuestWriteCoreDump));
2553 rc = VBoxGuestCommonIOCtl_WriteCoreDump(pDevExt, (VBoxGuestWriteCoreDump *)pvData);
2554 break;
2555
2556#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
2557 case VBOXGUEST_IOCTL_ENABLE_VRDP_SESSION:
2558 rc = VBoxGuestCommonIOCtl_EnableVRDPSession(pDevExt, pSession);
2559 break;
2560
2561 case VBOXGUEST_IOCTL_DISABLE_VRDP_SESSION:
2562 rc = VBoxGuestCommonIOCtl_DisableVRDPSession(pDevExt, pSession);
2563 break;
2564#endif /* VBOX_WITH_VRDP_SESSION_HANDLING */
2565 case VBOXGUEST_IOCTL_SET_MOUSE_STATUS:
2566 CHECKRET_SIZE("SET_MOUSE_STATUS", sizeof(uint32_t));
2567 rc = VBoxGuestCommonIOCtl_SetMouseStatus(pDevExt, pSession,
2568 *(uint32_t *)pvData);
2569 break;
2570
2571 default:
2572 {
2573 LogRel(("VBoxGuestCommonIOCtl: Unknown request iFunction=%#x Stripped size=%#x\n", iFunction,
2574 VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
2575 rc = VERR_NOT_SUPPORTED;
2576 break;
2577 }
2578 }
2579 }
2580
2581 Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
2582 return rc;
2583}
2584
2585
2586
2587/**
2588 * Common interrupt service routine.
2589 *
2590 * This deals with events and with waking up thread waiting for those events.
2591 *
2592 * @returns true if it was our interrupt, false if it wasn't.
2593 * @param pDevExt The VBoxGuest device extension.
2594 */
2595bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
2596{
2597 bool fMousePositionChanged = false;
2598 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
2599 int rc = 0;
2600 bool fOurIrq;
2601
2602 /*
2603 * Make sure we've initialized the device extension.
2604 */
2605 if (RT_UNLIKELY(!pReq))
2606 return false;
2607
2608 /*
2609 * Enter the spinlock and check if it's our IRQ or not.
2610 */
2611 RTSpinlockAcquire(pDevExt->EventSpinlock);
2612 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
2613 if (fOurIrq)
2614 {
2615 /*
2616 * Acknowlegde events.
2617 * We don't use VbglGRPerform here as it may take another spinlocks.
2618 */
2619 pReq->header.rc = VERR_INTERNAL_ERROR;
2620 pReq->events = 0;
2621 ASMCompilerBarrier();
2622 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
2623 ASMCompilerBarrier(); /* paranoia */
2624 if (RT_SUCCESS(pReq->header.rc))
2625 {
2626 uint32_t fEvents = pReq->events;
2627 PVBOXGUESTWAIT pWait;
2628 PVBOXGUESTWAIT pSafe;
2629
2630 Log(("VBoxGuestCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
2631
2632 /*
2633 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
2634 */
2635 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
2636 {
2637 fMousePositionChanged = true;
2638 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
2639 }
2640
2641#ifdef VBOX_WITH_HGCM
2642 /*
2643 * The HGCM event/list is kind of different in that we evaluate all entries.
2644 */
2645 if (fEvents & VMMDEV_EVENT_HGCM)
2646 {
2647 RTListForEachSafe(&pDevExt->HGCMWaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
2648 {
2649 if (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE)
2650 {
2651 pWait->fResEvents = VMMDEV_EVENT_HGCM;
2652 RTListNodeRemove(&pWait->ListNode);
2653# ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2654 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
2655# else
2656 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
2657 rc |= RTSemEventMultiSignal(pWait->Event);
2658# endif
2659 }
2660 }
2661 fEvents &= ~VMMDEV_EVENT_HGCM;
2662 }
2663#endif
2664
2665 /*
2666 * Normal FIFO waiter evaluation.
2667 */
2668 fEvents |= pDevExt->f32PendingEvents;
2669 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
2670 {
2671 if ( (pWait->fReqEvents & fEvents)
2672 && !pWait->fResEvents)
2673 {
2674 pWait->fResEvents = pWait->fReqEvents & fEvents;
2675 fEvents &= ~pWait->fResEvents;
2676 RTListNodeRemove(&pWait->ListNode);
2677#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2678 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
2679#else
2680 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
2681 rc |= RTSemEventMultiSignal(pWait->Event);
2682#endif
2683 if (!fEvents)
2684 break;
2685 }
2686 }
2687 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
2688 }
2689 else /* something is serious wrong... */
2690 Log(("VBoxGuestCommonISR: acknowledge events failed rc=%Rrc (events=%#x)!!\n",
2691 pReq->header.rc, pReq->events));
2692 }
2693 else
2694 LogFlow(("VBoxGuestCommonISR: not ours\n"));
2695
2696 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock);
2697
2698#if defined(VBOXGUEST_USE_DEFERRED_WAKE_UP) && !defined(RT_OS_WINDOWS)
2699 /*
2700 * Do wake-ups.
2701 * Note. On Windows this isn't possible at this IRQL, so a DPC will take
2702 * care of it.
2703 */
2704 VBoxGuestWaitDoWakeUps(pDevExt);
2705#endif
2706
2707 /*
2708 * Work the poll and async notification queues on OSes that implements that.
2709 * (Do this outside the spinlock to prevent some recursive spinlocking.)
2710 */
2711 if (fMousePositionChanged)
2712 {
2713 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
2714 VBoxGuestNativeISRMousePollEvent(pDevExt);
2715 }
2716
2717 Assert(rc == 0);
2718 return fOurIrq;
2719}
2720
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette