VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 38926

Last change on this file since 38926 was 38926, checked in by vboxsync, 13 years ago

Additions/common/VBoxGuest: fixed the mouse status testcase

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 90.5 KB
Line 
1/* $Id: VBoxGuest.cpp 38926 2011-09-30 21:09:25Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEFAULT
23#include "VBoxGuestInternal.h"
24#include "VBoxGuest2.h"
25#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
26#include <VBox/log.h>
27#include <iprt/mem.h>
28#include <iprt/time.h>
29#include <iprt/memobj.h>
30#include <iprt/asm.h>
31#include <iprt/asm-amd64-x86.h>
32#include <iprt/string.h>
33#include <iprt/process.h>
34#include <iprt/assert.h>
35#include <iprt/param.h>
36#ifdef VBOX_WITH_HGCM
37# include <iprt/thread.h>
38#endif
39#include "version-generated.h"
40#if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
41# include "revision-generated.h"
42#endif
43#ifdef RT_OS_WINDOWS
44# ifndef CTL_CODE
45# include <Windows.h>
46# endif
47#endif
48#if defined(RT_OS_SOLARIS)
49# include <iprt/rand.h>
50#endif
51
52
53/*******************************************************************************
54* Internal Functions *
55*******************************************************************************/
56#ifdef VBOX_WITH_HGCM
57static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
58#endif
59#ifdef DEBUG
60static void testSetMouseStatus(void);
61#endif
62static int VBoxGuestCommonIOCtl_SetMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fFeatures);
63
64
65/*******************************************************************************
66* Global Variables *
67*******************************************************************************/
68static const size_t cbChangeMemBalloonReq = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
69
70#if defined(RT_OS_SOLARIS)
71/**
72 * Drag in the rest of IRPT since we share it with the
73 * rest of the kernel modules on Solaris.
74 */
75PFNRT g_apfnVBoxGuestIPRTDeps[] =
76{
77 /* VirtioNet */
78 (PFNRT)RTRandBytes,
79 NULL
80};
81#endif /* RT_OS_SOLARIS */
82
83
84/**
85 * Reserves memory in which the VMM can relocate any guest mappings
86 * that are floating around.
87 *
88 * This operation is a little bit tricky since the VMM might not accept
89 * just any address because of address clashes between the three contexts
90 * it operates in, so use a small stack to perform this operation.
91 *
92 * @returns VBox status code (ignored).
93 * @param pDevExt The device extension.
94 */
95static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
96{
97 /*
98 * Query the required space.
99 */
100 VMMDevReqHypervisorInfo *pReq;
101 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
102 if (RT_FAILURE(rc))
103 return rc;
104 pReq->hypervisorStart = 0;
105 pReq->hypervisorSize = 0;
106 rc = VbglGRPerform(&pReq->header);
107 if (RT_FAILURE(rc)) /* this shouldn't happen! */
108 {
109 VbglGRFree(&pReq->header);
110 return rc;
111 }
112
113 /*
114 * The VMM will report back if there is nothing it wants to map, like for
115 * instance in VT-x and AMD-V mode.
116 */
117 if (pReq->hypervisorSize == 0)
118 Log(("vboxGuestInitFixateGuestMappings: nothing to do\n"));
119 else
120 {
121 /*
122 * We have to try several times since the host can be picky
123 * about certain addresses.
124 */
125 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
126 uint32_t cbHypervisor = pReq->hypervisorSize;
127 RTR0MEMOBJ ahTries[5];
128 uint32_t iTry;
129 bool fBitched = false;
130 Log(("vboxGuestInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
131 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
132 {
133 /*
134 * Reserve space, or if that isn't supported, create a object for
135 * some fictive physical memory and map that in to kernel space.
136 *
137 * To make the code a bit uglier, most systems cannot help with
138 * 4MB alignment, so we have to deal with that in addition to
139 * having two ways of getting the memory.
140 */
141 uint32_t uAlignment = _4M;
142 RTR0MEMOBJ hObj;
143 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
144 if (rc == VERR_NOT_SUPPORTED)
145 {
146 uAlignment = PAGE_SIZE;
147 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
148 }
149 /*
150 * If both RTR0MemObjReserveKernel calls above failed because either not supported or
151 * not implemented at all at the current platform, try to map the memory object into the
152 * virtual kernel space.
153 */
154 if (rc == VERR_NOT_SUPPORTED)
155 {
156 if (hFictive == NIL_RTR0MEMOBJ)
157 {
158 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
159 if (RT_FAILURE(rc))
160 break;
161 hFictive = hObj;
162 }
163 uAlignment = _4M;
164 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
165 if (rc == VERR_NOT_SUPPORTED)
166 {
167 uAlignment = PAGE_SIZE;
168 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
169 }
170 }
171 if (RT_FAILURE(rc))
172 {
173 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
174 rc, cbHypervisor, uAlignment, iTry));
175 fBitched = true;
176 break;
177 }
178
179 /*
180 * Try set it.
181 */
182 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
183 pReq->header.rc = VERR_INTERNAL_ERROR;
184 pReq->hypervisorSize = cbHypervisor;
185 pReq->hypervisorStart = (uintptr_t)RTR0MemObjAddress(hObj);
186 if ( uAlignment == PAGE_SIZE
187 && pReq->hypervisorStart & (_4M - 1))
188 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
189 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
190
191 rc = VbglGRPerform(&pReq->header);
192 if (RT_SUCCESS(rc))
193 {
194 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
195 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
196 RTR0MemObjAddress(pDevExt->hGuestMappings),
197 RTR0MemObjSize(pDevExt->hGuestMappings),
198 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
199 break;
200 }
201 ahTries[iTry] = hObj;
202 }
203
204 /*
205 * Cleanup failed attempts.
206 */
207 while (iTry-- > 0)
208 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
209 if ( RT_FAILURE(rc)
210 && hFictive != NIL_RTR0PTR)
211 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
212 if (RT_FAILURE(rc) && !fBitched)
213 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
214 }
215 VbglGRFree(&pReq->header);
216
217 /*
218 * We ignore failed attempts for now.
219 */
220 return VINF_SUCCESS;
221}
222
223
224/**
225 * Undo what vboxGuestInitFixateGuestMappings did.
226 *
227 * @param pDevExt The device extension.
228 */
229static void vboxGuestTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
230{
231 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
232 {
233 /*
234 * Tell the host that we're going to free the memory we reserved for
235 * it, the free it up. (Leak the memory if anything goes wrong here.)
236 */
237 VMMDevReqHypervisorInfo *pReq;
238 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
239 if (RT_SUCCESS(rc))
240 {
241 pReq->hypervisorStart = 0;
242 pReq->hypervisorSize = 0;
243 rc = VbglGRPerform(&pReq->header);
244 VbglGRFree(&pReq->header);
245 }
246 if (RT_SUCCESS(rc))
247 {
248 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
249 AssertRC(rc);
250 }
251 else
252 LogRel(("vboxGuestTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
253
254 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
255 }
256}
257
258
259/**
260 * Sets the interrupt filter mask during initialization and termination.
261 *
262 * This will ASSUME that we're the ones in carge over the mask, so
263 * we'll simply clear all bits we don't set.
264 *
265 * @returns VBox status code (ignored).
266 * @param pDevExt The device extension.
267 * @param fMask The new mask.
268 */
269static int vboxGuestSetFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask)
270{
271 VMMDevCtlGuestFilterMask *pReq;
272 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
273 if (RT_SUCCESS(rc))
274 {
275 pReq->u32OrMask = fMask;
276 pReq->u32NotMask = ~fMask;
277 rc = VbglGRPerform(&pReq->header);
278 if (RT_FAILURE(rc))
279 LogRel(("vboxGuestSetFilterMask: failed with rc=%Rrc\n", rc));
280 VbglGRFree(&pReq->header);
281 }
282 return rc;
283}
284
285
286/**
287 * Inflate the balloon by one chunk represented by an R0 memory object.
288 *
289 * The caller owns the balloon mutex.
290 *
291 * @returns IPRT status code.
292 * @param pMemObj Pointer to the R0 memory object.
293 * @param pReq The pre-allocated request for performing the VMMDev call.
294 */
295static int vboxGuestBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
296{
297 uint32_t iPage;
298 int rc;
299
300 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
301 {
302 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
303 pReq->aPhysPage[iPage] = phys;
304 }
305
306 pReq->fInflate = true;
307 pReq->header.size = cbChangeMemBalloonReq;
308 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
309
310 rc = VbglGRPerform(&pReq->header);
311 if (RT_FAILURE(rc))
312 LogRel(("vboxGuestBalloonInflate: VbglGRPerform failed. rc=%Rrc\n", rc));
313 return rc;
314}
315
316
317/**
318 * Deflate the balloon by one chunk - info the host and free the memory object.
319 *
320 * The caller owns the balloon mutex.
321 *
322 * @returns IPRT status code.
323 * @param pMemObj Pointer to the R0 memory object.
324 * The memory object will be freed afterwards.
325 * @param pReq The pre-allocated request for performing the VMMDev call.
326 */
327static int vboxGuestBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
328{
329 uint32_t iPage;
330 int rc;
331
332 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
333 {
334 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
335 pReq->aPhysPage[iPage] = phys;
336 }
337
338 pReq->fInflate = false;
339 pReq->header.size = cbChangeMemBalloonReq;
340 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
341
342 rc = VbglGRPerform(&pReq->header);
343 if (RT_FAILURE(rc))
344 {
345 LogRel(("vboxGuestBalloonDeflate: VbglGRPerform failed. rc=%Rrc\n", rc));
346 return rc;
347 }
348
349 rc = RTR0MemObjFree(*pMemObj, true);
350 if (RT_FAILURE(rc))
351 {
352 LogRel(("vboxGuestBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
353 return rc;
354 }
355
356 *pMemObj = NIL_RTR0MEMOBJ;
357 return VINF_SUCCESS;
358}
359
360
361/**
362 * Inflate/deflate the memory balloon and notify the host.
363 *
364 * This is a worker used by VBoxGuestCommonIOCtl_CheckMemoryBalloon - it takes
365 * the mutex.
366 *
367 * @returns VBox status code.
368 * @param pDevExt The device extension.
369 * @param pSession The session.
370 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
371 * @param pfHandleInR3 Where to return the handle-in-ring3 indicator
372 * (VINF_SUCCESS if set).
373 */
374static int vboxGuestSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, uint32_t *pfHandleInR3)
375{
376 int rc = VINF_SUCCESS;
377
378 if (pDevExt->MemBalloon.fUseKernelAPI)
379 {
380 VMMDevChangeMemBalloon *pReq;
381 uint32_t i;
382
383 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
384 {
385 LogRel(("vboxGuestSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
386 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
387 return VERR_INVALID_PARAMETER;
388 }
389
390 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
391 return VINF_SUCCESS; /* nothing to do */
392
393 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
394 && !pDevExt->MemBalloon.paMemObj)
395 {
396 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
397 if (!pDevExt->MemBalloon.paMemObj)
398 {
399 LogRel(("VBoxGuestSetBalloonSizeKernel: no memory for paMemObj!\n"));
400 return VERR_NO_MEMORY;
401 }
402 }
403
404 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
405 if (RT_FAILURE(rc))
406 return rc;
407
408 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
409 {
410 /* inflate */
411 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
412 {
413 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
414 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
415 if (RT_FAILURE(rc))
416 {
417 if (rc == VERR_NOT_SUPPORTED)
418 {
419 /* not supported -- fall back to the R3-allocated memory. */
420 rc = VINF_SUCCESS;
421 pDevExt->MemBalloon.fUseKernelAPI = false;
422 Assert(pDevExt->MemBalloon.cChunks == 0);
423 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
424 }
425 /* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
426 * cannot allocate more memory => don't try further, just stop here */
427 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
428 break;
429 }
430
431 rc = vboxGuestBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
432 if (RT_FAILURE(rc))
433 {
434 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
435 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
436 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
437 break;
438 }
439 pDevExt->MemBalloon.cChunks++;
440 }
441 }
442 else
443 {
444 /* deflate */
445 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
446 {
447 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
448 if (RT_FAILURE(rc))
449 {
450 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
451 break;
452 }
453 pDevExt->MemBalloon.cChunks--;
454 }
455 }
456
457 VbglGRFree(&pReq->header);
458 }
459
460 /*
461 * Set the handle-in-ring3 indicator. When set Ring-3 will have to work
462 * the balloon changes via the other API.
463 */
464 *pfHandleInR3 = pDevExt->MemBalloon.fUseKernelAPI ? false : true;
465
466 return rc;
467}
468
469
470/**
471 * Helper to reinit the VBoxVMM communication after hibernation.
472 *
473 * @returns VBox status code.
474 * @param pDevExt The device extension.
475 * @param enmOSType The OS type.
476 */
477int VBoxGuestReinitDevExtAfterHibernation(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
478{
479 int rc = VBoxGuestReportGuestInfo(enmOSType);
480 if (RT_SUCCESS(rc))
481 {
482 rc = VBoxGuestReportDriverStatus(true /* Driver is active */);
483 if (RT_FAILURE(rc))
484 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: could not report guest driver status, rc=%Rrc\n", rc));
485 }
486 else
487 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: could not report guest information to host, rc=%Rrc\n", rc));
488 Log(("VBoxGuest::VBoxGuestReinitDevExtAfterHibernation: returned with rc=%Rrc\n", rc));
489 return rc;
490}
491
492
493/**
494 * Inflate/deflate the balloon by one chunk.
495 *
496 * Worker for VBoxGuestCommonIOCtl_ChangeMemoryBalloon - it takes the mutex.
497 *
498 * @returns VBox status code.
499 * @param pDevExt The device extension.
500 * @param pSession The session.
501 * @param u64ChunkAddr The address of the chunk to add to / remove from the
502 * balloon.
503 * @param fInflate Inflate if true, deflate if false.
504 */
505static int vboxGuestSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
506 uint64_t u64ChunkAddr, bool fInflate)
507{
508 VMMDevChangeMemBalloon *pReq;
509 int rc = VINF_SUCCESS;
510 uint32_t i;
511 PRTR0MEMOBJ pMemObj = NULL;
512
513 if (fInflate)
514 {
515 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
516 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
517 {
518 LogRel(("vboxGuestSetBalloonSize: cannot inflate balloon, already have %u chunks (max=%u)\n",
519 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
520 return VERR_INVALID_PARAMETER;
521 }
522
523 if (!pDevExt->MemBalloon.paMemObj)
524 {
525 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
526 if (!pDevExt->MemBalloon.paMemObj)
527 {
528 LogRel(("VBoxGuestSetBalloonSizeFromUser: no memory for paMemObj!\n"));
529 return VERR_NO_MEMORY;
530 }
531 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
532 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
533 }
534 }
535 else
536 {
537 if (pDevExt->MemBalloon.cChunks == 0)
538 {
539 AssertMsgFailed(("vboxGuestSetBalloonSize: cannot decrease balloon, already at size 0\n"));
540 return VERR_INVALID_PARAMETER;
541 }
542 }
543
544 /*
545 * Enumerate all memory objects and check if the object is already registered.
546 */
547 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
548 {
549 if ( fInflate
550 && !pMemObj
551 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
552 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
553 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == u64ChunkAddr)
554 {
555 if (fInflate)
556 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
557 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
558 break;
559 }
560 }
561 if (!pMemObj)
562 {
563 if (fInflate)
564 {
565 /* no free object pointer found -- should not happen */
566 return VERR_NO_MEMORY;
567 }
568
569 /* cannot free this memory as it wasn't provided before */
570 return VERR_NOT_FOUND;
571 }
572
573 /*
574 * Try inflate / default the balloon as requested.
575 */
576 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
577 if (RT_FAILURE(rc))
578 return rc;
579
580 if (fInflate)
581 {
582 rc = RTR0MemObjLockUser(pMemObj, u64ChunkAddr, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
583 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
584 if (RT_SUCCESS(rc))
585 {
586 rc = vboxGuestBalloonInflate(pMemObj, pReq);
587 if (RT_SUCCESS(rc))
588 pDevExt->MemBalloon.cChunks++;
589 else
590 {
591 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
592 RTR0MemObjFree(*pMemObj, true);
593 *pMemObj = NIL_RTR0MEMOBJ;
594 }
595 }
596 }
597 else
598 {
599 rc = vboxGuestBalloonDeflate(pMemObj, pReq);
600 if (RT_SUCCESS(rc))
601 pDevExt->MemBalloon.cChunks--;
602 else
603 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
604 }
605
606 VbglGRFree(&pReq->header);
607 return rc;
608}
609
610
611/**
612 * Cleanup the memory balloon of a session.
613 *
614 * Will request the balloon mutex, so it must be valid and the caller must not
615 * own it already.
616 *
617 * @param pDevExt The device extension.
618 * @param pDevExt The session. Can be NULL at unload.
619 */
620static void vboxGuestCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
621{
622 RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
623 if ( pDevExt->MemBalloon.pOwner == pSession
624 || pSession == NULL /*unload*/)
625 {
626 if (pDevExt->MemBalloon.paMemObj)
627 {
628 VMMDevChangeMemBalloon *pReq;
629 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
630 if (RT_SUCCESS(rc))
631 {
632 uint32_t i;
633 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
634 {
635 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
636 if (RT_FAILURE(rc))
637 {
638 LogRel(("vboxGuestCloseMemBalloon: Deflate failed with rc=%Rrc. Will leak %u chunks.\n",
639 rc, pDevExt->MemBalloon.cChunks));
640 break;
641 }
642 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
643 pDevExt->MemBalloon.cChunks--;
644 }
645 VbglGRFree(&pReq->header);
646 }
647 else
648 LogRel(("vboxGuestCloseMemBalloon: Failed to allocate VMMDev request buffer (rc=%Rrc). Will leak %u chunks.\n",
649 rc, pDevExt->MemBalloon.cChunks));
650 RTMemFree(pDevExt->MemBalloon.paMemObj);
651 pDevExt->MemBalloon.paMemObj = NULL;
652 }
653
654 pDevExt->MemBalloon.pOwner = NULL;
655 }
656 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
657}
658
659
660/**
661 * Initializes the VBoxGuest device extension when the
662 * device driver is loaded.
663 *
664 * The native code locates the VMMDev on the PCI bus and retrieve
665 * the MMIO and I/O port ranges, this function will take care of
666 * mapping the MMIO memory (if present). Upon successful return
667 * the native code should set up the interrupt handler.
668 *
669 * @returns VBox status code.
670 *
671 * @param pDevExt The device extension. Allocated by the native code.
672 * @param IOPortBase The base of the I/O port range.
673 * @param pvMMIOBase The base of the MMIO memory mapping.
674 * This is optional, pass NULL if not present.
675 * @param cbMMIO The size of the MMIO memory mapping.
676 * This is optional, pass 0 if not present.
677 * @param enmOSType The guest OS type to report to the VMMDev.
678 * @param fFixedEvents Events that will be enabled upon init and no client
679 * will ever be allowed to mask.
680 */
681int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
682 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
683{
684 int rc, rc2;
685
686 /*
687 * Adjust fFixedEvents.
688 */
689#ifdef VBOX_WITH_HGCM
690 fFixedEvents |= VMMDEV_EVENT_HGCM;
691#endif
692
693 /*
694 * Initialize the data.
695 */
696 pDevExt->IOPortBase = IOPortBase;
697 pDevExt->pVMMDevMemory = NULL;
698 pDevExt->fFixedEvents = fFixedEvents;
699 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
700 pDevExt->EventSpinlock = NIL_RTSPINLOCK;
701 pDevExt->pIrqAckEvents = NULL;
702 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
703 RTListInit(&pDevExt->WaitList);
704#ifdef VBOX_WITH_HGCM
705 RTListInit(&pDevExt->HGCMWaitList);
706#endif
707#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
708 RTListInit(&pDevExt->WakeUpList);
709#endif
710 RTListInit(&pDevExt->WokenUpList);
711 RTListInit(&pDevExt->FreeList);
712 pDevExt->f32PendingEvents = 0;
713 pDevExt->u32MousePosChangedSeq = 0;
714 pDevExt->SessionSpinlock = NIL_RTSPINLOCK;
715 pDevExt->MemBalloon.hMtx = NIL_RTSEMFASTMUTEX;
716 pDevExt->MemBalloon.cChunks = 0;
717 pDevExt->MemBalloon.cMaxChunks = 0;
718 pDevExt->MemBalloon.fUseKernelAPI = true;
719 pDevExt->MemBalloon.paMemObj = NULL;
720 pDevExt->MemBalloon.pOwner = NULL;
721
722 /*
723 * If there is an MMIO region validate the version and size.
724 */
725 if (pvMMIOBase)
726 {
727 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
728 Assert(cbMMIO);
729 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
730 && pVMMDev->u32Size >= 32
731 && pVMMDev->u32Size <= cbMMIO)
732 {
733 pDevExt->pVMMDevMemory = pVMMDev;
734 Log(("VBoxGuestInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
735 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
736 }
737 else /* try live without it. */
738 LogRel(("VBoxGuestInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
739 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
740 }
741
742 /*
743 * Create the wait and session spinlocks as well as the ballooning mutex.
744 */
745 rc = RTSpinlockCreate(&pDevExt->EventSpinlock);
746 if (RT_SUCCESS(rc))
747 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock);
748 if (RT_FAILURE(rc))
749 {
750 LogRel(("VBoxGuestInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
751 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
752 RTSpinlockDestroy(pDevExt->EventSpinlock);
753 return rc;
754 }
755
756 rc = RTSemFastMutexCreate(&pDevExt->MemBalloon.hMtx);
757 if (RT_FAILURE(rc))
758 {
759 LogRel(("VBoxGuestInitDevExt: failed to create mutex, rc=%Rrc!\n", rc));
760 RTSpinlockDestroy(pDevExt->SessionSpinlock);
761 RTSpinlockDestroy(pDevExt->EventSpinlock);
762 return rc;
763 }
764
765 /*
766 * Initialize the guest library and report the guest info back to VMMDev,
767 * set the interrupt control filter mask, and fixate the guest mappings
768 * made by the VMM.
769 */
770 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
771 if (RT_SUCCESS(rc))
772 {
773 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
774 if (RT_SUCCESS(rc))
775 {
776 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
777 Assert(pDevExt->PhysIrqAckEvents != 0);
778
779 rc = VBoxGuestReportGuestInfo(enmOSType);
780 if (RT_SUCCESS(rc))
781 {
782 rc = vboxGuestSetFilterMask(pDevExt, fFixedEvents);
783 if (RT_SUCCESS(rc))
784 {
785 /*
786 * Disable guest graphics capability by default. The guest specific
787 * graphics driver will re-enable this when it is necessary.
788 */
789 rc = VBoxGuestSetGuestCapabilities(0, VMMDEV_GUEST_SUPPORTS_GRAPHICS);
790 if (RT_SUCCESS(rc))
791 {
792 vboxGuestInitFixateGuestMappings(pDevExt);
793
794#ifdef DEBUG
795 testSetMouseStatus(); /* Other tests? */
796#endif
797
798 rc = VBoxGuestReportDriverStatus(true /* Driver is active */);
799 if (RT_FAILURE(rc))
800 LogRel(("VBoxGuestInitDevExt: VBoxReportGuestDriverStatus failed, rc=%Rrc\n", rc));
801
802 Log(("VBoxGuestInitDevExt: returns success\n"));
803 return VINF_SUCCESS;
804 }
805
806 LogRel(("VBoxGuestInitDevExt: VBoxGuestSetGuestCapabilities failed, rc=%Rrc\n", rc));
807 }
808 else
809 LogRel(("VBoxGuestInitDevExt: vboxGuestSetFilterMask failed, rc=%Rrc\n", rc));
810 }
811 else
812 LogRel(("VBoxGuestInitDevExt: VBoxReportGuestInfo failed, rc=%Rrc\n", rc));
813 VbglGRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
814 }
815 else
816 LogRel(("VBoxGuestInitDevExt: VBoxGRAlloc failed, rc=%Rrc\n", rc));
817
818 VbglTerminate();
819 }
820 else
821 LogRel(("VBoxGuestInitDevExt: VbglInit failed, rc=%Rrc\n", rc));
822
823 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
824 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
825 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
826 return rc; /* (failed) */
827}
828
829
830/**
831 * Deletes all the items in a wait chain.
832 * @param pList The head of the chain.
833 */
834static void VBoxGuestDeleteWaitList(PRTLISTNODE pList)
835{
836 while (!RTListIsEmpty(pList))
837 {
838 int rc2;
839 PVBOXGUESTWAIT pWait = RTListGetFirst(pList, VBOXGUESTWAIT, ListNode);
840 RTListNodeRemove(&pWait->ListNode);
841
842 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
843 pWait->Event = NIL_RTSEMEVENTMULTI;
844 pWait->pSession = NULL;
845 RTMemFree(pWait);
846 }
847}
848
849
850/**
851 * Destroys the VBoxGuest device extension.
852 *
853 * The native code should call this before the driver is loaded,
854 * but don't call this on shutdown.
855 *
856 * @param pDevExt The device extension.
857 */
858void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
859{
860 int rc2;
861 Log(("VBoxGuestDeleteDevExt:\n"));
862 Log(("VBoxGuest: The additions driver is terminating.\n"));
863
864 /*
865 * Clean up the bits that involves the host first.
866 */
867 vboxGuestTermUnfixGuestMappings(pDevExt);
868 VBoxGuestSetGuestCapabilities(0, UINT32_MAX); /* clears all capabilities */
869 vboxGuestSetFilterMask(pDevExt, 0); /* filter all events */
870 vboxGuestCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
871
872 /*
873 * Cleanup all the other resources.
874 */
875 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
876 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
877 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
878
879 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
880#ifdef VBOX_WITH_HGCM
881 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
882#endif
883#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
884 VBoxGuestDeleteWaitList(&pDevExt->WakeUpList);
885#endif
886 VBoxGuestDeleteWaitList(&pDevExt->WokenUpList);
887 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
888
889 VbglTerminate();
890
891 pDevExt->pVMMDevMemory = NULL;
892
893 pDevExt->IOPortBase = 0;
894 pDevExt->pIrqAckEvents = NULL;
895}
896
897
898/**
899 * Creates a VBoxGuest user session.
900 *
901 * The native code calls this when a ring-3 client opens the device.
902 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
903 *
904 * @returns VBox status code.
905 * @param pDevExt The device extension.
906 * @param ppSession Where to store the session on success.
907 */
908int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
909{
910 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
911 if (RT_UNLIKELY(!pSession))
912 {
913 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
914 return VERR_NO_MEMORY;
915 }
916
917 pSession->Process = RTProcSelf();
918 pSession->R0Process = RTR0ProcHandleSelf();
919 pSession->pDevExt = pDevExt;
920
921 *ppSession = pSession;
922 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
923 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
924 return VINF_SUCCESS;
925}
926
927
928/**
929 * Creates a VBoxGuest kernel session.
930 *
931 * The native code calls this when a ring-0 client connects to the device.
932 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
933 *
934 * @returns VBox status code.
935 * @param pDevExt The device extension.
936 * @param ppSession Where to store the session on success.
937 */
938int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
939{
940 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
941 if (RT_UNLIKELY(!pSession))
942 {
943 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
944 return VERR_NO_MEMORY;
945 }
946
947 pSession->Process = NIL_RTPROCESS;
948 pSession->R0Process = NIL_RTR0PROCESS;
949 pSession->pDevExt = pDevExt;
950
951 *ppSession = pSession;
952 LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
953 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
954 return VINF_SUCCESS;
955}
956
957
958
959/**
960 * Closes a VBoxGuest session.
961 *
962 * @param pDevExt The device extension.
963 * @param pSession The session to close (and free).
964 */
965void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
966{
967 unsigned i; NOREF(i);
968 Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
969 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
970
971#ifdef VBOX_WITH_HGCM
972 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
973 if (pSession->aHGCMClientIds[i])
974 {
975 VBoxGuestHGCMDisconnectInfo Info;
976 Info.result = 0;
977 Info.u32ClientID = pSession->aHGCMClientIds[i];
978 pSession->aHGCMClientIds[i] = 0;
979 Log(("VBoxGuestCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
980 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
981 }
982#endif
983
984 pSession->pDevExt = NULL;
985 pSession->Process = NIL_RTPROCESS;
986 pSession->R0Process = NIL_RTR0PROCESS;
987 vboxGuestCloseMemBalloon(pDevExt, pSession);
988 /* Reset any mouse status flags which the session may have set. */
989 VBoxGuestCommonIOCtl_SetMouseStatus(pDevExt, pSession, 0);
990 RTMemFree(pSession);
991}
992
993
994/**
995 * Allocates a wait-for-event entry.
996 *
997 * @returns The wait-for-event entry.
998 * @param pDevExt The device extension.
999 * @param pSession The session that's allocating this. Can be NULL.
1000 */
1001static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1002{
1003 /*
1004 * Allocate it one way or the other.
1005 */
1006 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1007 if (pWait)
1008 {
1009 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1010 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1011
1012 pWait = RTListGetFirst(&pDevExt->FreeList, VBOXGUESTWAIT, ListNode);
1013 if (pWait)
1014 RTListNodeRemove(&pWait->ListNode);
1015
1016 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1017 }
1018 if (!pWait)
1019 {
1020 static unsigned s_cErrors = 0;
1021 int rc;
1022
1023 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1024 if (!pWait)
1025 {
1026 if (s_cErrors++ < 32)
1027 LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
1028 return NULL;
1029 }
1030
1031 rc = RTSemEventMultiCreate(&pWait->Event);
1032 if (RT_FAILURE(rc))
1033 {
1034 if (s_cErrors++ < 32)
1035 LogRel(("VBoxGuestCommonIOCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
1036 RTMemFree(pWait);
1037 return NULL;
1038 }
1039
1040 pWait->ListNode.pNext = NULL;
1041 pWait->ListNode.pPrev = NULL;
1042 }
1043
1044 /*
1045 * Zero members just as an precaution.
1046 */
1047 pWait->fReqEvents = 0;
1048 pWait->fResEvents = 0;
1049#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1050 pWait->fPendingWakeUp = false;
1051 pWait->fFreeMe = false;
1052#endif
1053 pWait->pSession = pSession;
1054#ifdef VBOX_WITH_HGCM
1055 pWait->pHGCMReq = NULL;
1056#endif
1057 RTSemEventMultiReset(pWait->Event);
1058 return pWait;
1059}
1060
1061
1062/**
1063 * Frees the wait-for-event entry.
1064 *
1065 * The caller must own the wait spinlock !
1066 * The entry must be in a list!
1067 *
1068 * @param pDevExt The device extension.
1069 * @param pWait The wait-for-event entry to free.
1070 */
1071static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1072{
1073 pWait->fReqEvents = 0;
1074 pWait->fResEvents = 0;
1075#ifdef VBOX_WITH_HGCM
1076 pWait->pHGCMReq = NULL;
1077#endif
1078#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1079 Assert(!pWait->fFreeMe);
1080 if (pWait->fPendingWakeUp)
1081 pWait->fFreeMe = true;
1082 else
1083#endif
1084 {
1085 RTListNodeRemove(&pWait->ListNode);
1086 RTListAppend(&pDevExt->FreeList, &pWait->ListNode);
1087 }
1088}
1089
1090
1091/**
1092 * Frees the wait-for-event entry.
1093 *
1094 * @param pDevExt The device extension.
1095 * @param pWait The wait-for-event entry to free.
1096 */
1097static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1098{
1099 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1100 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1101 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1102 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1103}
1104
1105
1106#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1107/**
1108 * Processes the wake-up list.
1109 *
1110 * All entries in the wake-up list gets signalled and moved to the woken-up
1111 * list.
1112 *
1113 * @param pDevExt The device extension.
1114 */
1115void VBoxGuestWaitDoWakeUps(PVBOXGUESTDEVEXT pDevExt)
1116{
1117 if (!RTListIsEmpty(&pDevExt->WakeUpList))
1118 {
1119 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1120 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1121 for (;;)
1122 {
1123 int rc;
1124 PVBOXGUESTWAIT pWait = RTListGetFirst(&pDevExt->WakeUpList, VBOXGUESTWAIT, ListNode);
1125 if (!pWait)
1126 break;
1127 pWait->fPendingWakeUp = true;
1128 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1129
1130 rc = RTSemEventMultiSignal(pWait->Event);
1131 AssertRC(rc);
1132
1133 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1134 pWait->fPendingWakeUp = false;
1135 if (!pWait->fFreeMe)
1136 {
1137 RTListNodeRemove(&pWait->ListNode);
1138 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1139 }
1140 else
1141 {
1142 pWait->fFreeMe = false;
1143 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1144 }
1145 }
1146 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1147 }
1148}
1149#endif /* VBOXGUEST_USE_DEFERRED_WAKE_UP */
1150
1151
1152/**
1153 * Modifies the guest capabilities.
1154 *
1155 * Should be called during driver init and termination.
1156 *
1157 * @returns VBox status code.
1158 * @param fOr The Or mask (what to enable).
1159 * @param fNot The Not mask (what to disable).
1160 */
1161int VBoxGuestSetGuestCapabilities(uint32_t fOr, uint32_t fNot)
1162{
1163 VMMDevReqGuestCapabilities2 *pReq;
1164 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
1165 if (RT_FAILURE(rc))
1166 {
1167 Log(("VBoxGuestSetGuestCapabilities: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1168 sizeof(*pReq), sizeof(*pReq), rc));
1169 return rc;
1170 }
1171
1172 pReq->u32OrMask = fOr;
1173 pReq->u32NotMask = fNot;
1174
1175 rc = VbglGRPerform(&pReq->header);
1176 if (RT_FAILURE(rc))
1177 Log(("VBoxGuestSetGuestCapabilities: VbglGRPerform failed, rc=%Rrc!\n", rc));
1178
1179 VbglGRFree(&pReq->header);
1180 return rc;
1181}
1182
1183
1184/**
1185 * Implements the fast (no input or output) type of IOCtls.
1186 *
1187 * This is currently just a placeholder stub inherited from the support driver code.
1188 *
1189 * @returns VBox status code.
1190 * @param iFunction The IOCtl function number.
1191 * @param pDevExt The device extension.
1192 * @param pSession The session.
1193 */
1194int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1195{
1196 Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1197
1198 NOREF(iFunction);
1199 NOREF(pDevExt);
1200 NOREF(pSession);
1201 return VERR_NOT_SUPPORTED;
1202}
1203
1204
1205/**
1206 * Return the VMM device port.
1207 *
1208 * returns IPRT status code.
1209 * @param pDevExt The device extension.
1210 * @param pInfo The request info.
1211 * @param pcbDataReturned (out) contains the number of bytes to return.
1212 */
1213static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
1214{
1215 Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
1216 pInfo->portAddress = pDevExt->IOPortBase;
1217 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
1218 if (pcbDataReturned)
1219 *pcbDataReturned = sizeof(*pInfo);
1220 return VINF_SUCCESS;
1221}
1222
1223
1224/**
1225 * Worker VBoxGuestCommonIOCtl_WaitEvent.
1226 *
1227 * The caller enters the spinlock, we leave it.
1228 *
1229 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
1230 */
1231DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWaitEventInfo *pInfo,
1232 int iEvent, const uint32_t fReqEvents, PRTSPINLOCKTMP pTmp)
1233{
1234 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
1235 if (fMatches)
1236 {
1237 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
1238 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, pTmp);
1239
1240 pInfo->u32EventFlagsOut = fMatches;
1241 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1242 if (fReqEvents & ~((uint32_t)1 << iEvent))
1243 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1244 else
1245 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1246 return VINF_SUCCESS;
1247 }
1248 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, pTmp);
1249 return VERR_TIMEOUT;
1250}
1251
1252
1253static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1254 VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned, bool fInterruptible)
1255{
1256 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1257 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
1258 uint32_t fResEvents;
1259 int iEvent;
1260 PVBOXGUESTWAIT pWait;
1261 int rc;
1262
1263 pInfo->u32EventFlagsOut = 0;
1264 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1265 if (pcbDataReturned)
1266 *pcbDataReturned = sizeof(*pInfo);
1267
1268 /*
1269 * Copy and verify the input mask.
1270 */
1271 iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
1272 if (RT_UNLIKELY(iEvent < 0))
1273 {
1274 Log(("VBoxGuestCommonIOCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
1275 return VERR_INVALID_PARAMETER;
1276 }
1277
1278 /*
1279 * Check the condition up front, before doing the wait-for-event allocations.
1280 */
1281 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1282 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
1283 if (rc == VINF_SUCCESS)
1284 return rc;
1285
1286 if (!pInfo->u32TimeoutIn)
1287 {
1288 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1289 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
1290 return VERR_TIMEOUT;
1291 }
1292
1293 pWait = VBoxGuestWaitAlloc(pDevExt, pSession);
1294 if (!pWait)
1295 return VERR_NO_MEMORY;
1296 pWait->fReqEvents = fReqEvents;
1297
1298 /*
1299 * We've got the wait entry now, re-enter the spinlock and check for the condition.
1300 * If the wait condition is met, return.
1301 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
1302 */
1303 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1304 RTListAppend(&pDevExt->WaitList, &pWait->ListNode);
1305 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
1306 if (rc == VINF_SUCCESS)
1307 {
1308 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1309 return rc;
1310 }
1311
1312 if (fInterruptible)
1313 rc = RTSemEventMultiWaitNoResume(pWait->Event,
1314 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1315 else
1316 rc = RTSemEventMultiWait(pWait->Event,
1317 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1318
1319 /*
1320 * There is one special case here and that's when the semaphore is
1321 * destroyed upon device driver unload. This shouldn't happen of course,
1322 * but in case it does, just get out of here ASAP.
1323 */
1324 if (rc == VERR_SEM_DESTROYED)
1325 return rc;
1326
1327 /*
1328 * Unlink the wait item and dispose of it.
1329 */
1330 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1331 fResEvents = pWait->fResEvents;
1332 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1333 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1334
1335 /*
1336 * Now deal with the return code.
1337 */
1338 if ( fResEvents
1339 && fResEvents != UINT32_MAX)
1340 {
1341 pInfo->u32EventFlagsOut = fResEvents;
1342 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1343 if (fReqEvents & ~((uint32_t)1 << iEvent))
1344 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1345 else
1346 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1347 rc = VINF_SUCCESS;
1348 }
1349 else if ( fResEvents == UINT32_MAX
1350 || rc == VERR_INTERRUPTED)
1351 {
1352 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
1353 rc = VERR_INTERRUPTED;
1354 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
1355 }
1356 else if (rc == VERR_TIMEOUT)
1357 {
1358 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1359 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT (2)\n"));
1360 }
1361 else
1362 {
1363 if (RT_SUCCESS(rc))
1364 {
1365 static unsigned s_cErrors = 0;
1366 if (s_cErrors++ < 32)
1367 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
1368 rc = VERR_INTERNAL_ERROR;
1369 }
1370 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1371 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc\n", rc));
1372 }
1373
1374 return rc;
1375}
1376
1377
1378static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1379{
1380 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1381 PVBOXGUESTWAIT pWait;
1382 PVBOXGUESTWAIT pSafe;
1383 int rc = 0;
1384
1385 Log(("VBoxGuestCommonIOCtl: CANCEL_ALL_WAITEVENTS\n"));
1386
1387 /*
1388 * Walk the event list and wake up anyone with a matching session.
1389 */
1390 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1391 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
1392 {
1393 if (pWait->pSession == pSession)
1394 {
1395 pWait->fResEvents = UINT32_MAX;
1396 RTListNodeRemove(&pWait->ListNode);
1397#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1398 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
1399#else
1400 rc |= RTSemEventMultiSignal(pWait->Event);
1401 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
1402#endif
1403 }
1404 }
1405 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1406 Assert(rc == 0);
1407
1408#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
1409 VBoxGuestWaitDoWakeUps(pDevExt);
1410#endif
1411
1412 return VINF_SUCCESS;
1413}
1414
1415
1416static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1417 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
1418{
1419 int rc;
1420 VMMDevRequestHeader *pReqCopy;
1421
1422 /*
1423 * Validate the header and request size.
1424 */
1425 const VMMDevRequestType enmType = pReqHdr->requestType;
1426 const uint32_t cbReq = pReqHdr->size;
1427 const uint32_t cbMinSize = vmmdevGetRequestSize(enmType);
1428
1429 Log(("VBoxGuestCommonIOCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
1430
1431 if (cbReq < cbMinSize)
1432 {
1433 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
1434 cbReq, cbMinSize, enmType));
1435 return VERR_INVALID_PARAMETER;
1436 }
1437 if (cbReq > cbData)
1438 {
1439 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
1440 cbData, cbReq, enmType));
1441 return VERR_INVALID_PARAMETER;
1442 }
1443 rc = VbglGRVerify(pReqHdr, cbData);
1444 if (RT_FAILURE(rc))
1445 {
1446 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc!!\n",
1447 cbData, cbReq, enmType, rc));
1448 return rc;
1449 }
1450
1451 /*
1452 * Make a copy of the request in the physical memory heap so
1453 * the VBoxGuestLibrary can more easily deal with the request.
1454 * (This is really a waste of time since the OS or the OS specific
1455 * code has already buffered or locked the input/output buffer, but
1456 * it does makes things a bit simpler wrt to phys address.)
1457 */
1458 rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
1459 if (RT_FAILURE(rc))
1460 {
1461 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1462 cbReq, cbReq, rc));
1463 return rc;
1464 }
1465 memcpy(pReqCopy, pReqHdr, cbReq);
1466
1467 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
1468 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
1469
1470 rc = VbglGRPerform(pReqCopy);
1471 if ( RT_SUCCESS(rc)
1472 && RT_SUCCESS(pReqCopy->rc))
1473 {
1474 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
1475 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
1476
1477 memcpy(pReqHdr, pReqCopy, cbReq);
1478 if (pcbDataReturned)
1479 *pcbDataReturned = cbReq;
1480 }
1481 else if (RT_FAILURE(rc))
1482 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
1483 else
1484 {
1485 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
1486 rc = pReqCopy->rc;
1487 }
1488
1489 VbglGRFree(pReqCopy);
1490 return rc;
1491}
1492
1493
1494static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, VBoxGuestFilterMaskInfo *pInfo)
1495{
1496 VMMDevCtlGuestFilterMask *pReq;
1497 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
1498 if (RT_FAILURE(rc))
1499 {
1500 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1501 sizeof(*pReq), sizeof(*pReq), rc));
1502 return rc;
1503 }
1504
1505 pReq->u32OrMask = pInfo->u32OrMask;
1506 pReq->u32NotMask = pInfo->u32NotMask;
1507 pReq->u32NotMask &= ~pDevExt->fFixedEvents; /* don't permit these to be cleared! */
1508 rc = VbglGRPerform(&pReq->header);
1509 if (RT_FAILURE(rc))
1510 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: VbglGRPerform failed, rc=%Rrc!\n", rc));
1511
1512 VbglGRFree(&pReq->header);
1513 return rc;
1514}
1515
1516#ifdef VBOX_WITH_HGCM
1517
1518AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
1519
1520/** Worker for VBoxGuestHGCMAsyncWaitCallback*. */
1521static int VBoxGuestHGCMAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
1522 bool fInterruptible, uint32_t cMillies)
1523{
1524 int rc;
1525
1526 /*
1527 * Check to see if the condition was met by the time we got here.
1528 *
1529 * We create a simple poll loop here for dealing with out-of-memory
1530 * conditions since the caller isn't necessarily able to deal with
1531 * us returning too early.
1532 */
1533 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1534 PVBOXGUESTWAIT pWait;
1535 for (;;)
1536 {
1537 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1538 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1539 {
1540 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1541 return VINF_SUCCESS;
1542 }
1543 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1544
1545 pWait = VBoxGuestWaitAlloc(pDevExt, NULL);
1546 if (pWait)
1547 break;
1548 if (fInterruptible)
1549 return VERR_INTERRUPTED;
1550 RTThreadSleep(1);
1551 }
1552 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
1553 pWait->pHGCMReq = pHdr;
1554
1555 /*
1556 * Re-enter the spinlock and re-check for the condition.
1557 * If the condition is met, return.
1558 * Otherwise link us into the HGCM wait list and go to sleep.
1559 */
1560 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1561 RTListAppend(&pDevExt->HGCMWaitList, &pWait->ListNode);
1562 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1563 {
1564 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1565 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1566 return VINF_SUCCESS;
1567 }
1568 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1569
1570 if (fInterruptible)
1571 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
1572 else
1573 rc = RTSemEventMultiWait(pWait->Event, cMillies);
1574 if (rc == VERR_SEM_DESTROYED)
1575 return rc;
1576
1577 /*
1578 * Unlink, free and return.
1579 */
1580 if ( RT_FAILURE(rc)
1581 && rc != VERR_TIMEOUT
1582 && ( !fInterruptible
1583 || rc != VERR_INTERRUPTED))
1584 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
1585
1586 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1587 return rc;
1588}
1589
1590
1591/**
1592 * This is a callback for dealing with async waits.
1593 *
1594 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1595 */
1596static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
1597{
1598 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1599 Log(("VBoxGuestHGCMAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
1600 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1601 pDevExt,
1602 false /* fInterruptible */,
1603 u32User /* cMillies */);
1604}
1605
1606
1607/**
1608 * This is a callback for dealing with async waits with a timeout.
1609 *
1610 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1611 */
1612static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr,
1613 void *pvUser, uint32_t u32User)
1614{
1615 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1616 Log(("VBoxGuestHGCMAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
1617 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1618 pDevExt,
1619 true /* fInterruptible */,
1620 u32User /* cMillies */ );
1621
1622}
1623
1624
1625static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1626 VBoxGuestHGCMConnectInfo *pInfo, size_t *pcbDataReturned)
1627{
1628 int rc;
1629
1630 /*
1631 * The VbglHGCMConnect call will invoke the callback if the HGCM
1632 * call is performed in an ASYNC fashion. The function is not able
1633 * to deal with cancelled requests.
1634 */
1635 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
1636 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
1637 ? pInfo->Loc.u.host.achName : "<not local host>"));
1638
1639 rc = VbglR0HGCMInternalConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1640 if (RT_SUCCESS(rc))
1641 {
1642 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
1643 pInfo->u32ClientID, pInfo->result, rc));
1644 if (RT_SUCCESS(pInfo->result))
1645 {
1646 /*
1647 * Append the client id to the client id table.
1648 * If the table has somehow become filled up, we'll disconnect the session.
1649 */
1650 unsigned i;
1651 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1652 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1653 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1654 if (!pSession->aHGCMClientIds[i])
1655 {
1656 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
1657 break;
1658 }
1659 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1660 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1661 {
1662 static unsigned s_cErrors = 0;
1663 VBoxGuestHGCMDisconnectInfo Info;
1664
1665 if (s_cErrors++ < 32)
1666 LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
1667
1668 Info.result = 0;
1669 Info.u32ClientID = pInfo->u32ClientID;
1670 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1671 return VERR_TOO_MANY_OPEN_FILES;
1672 }
1673 }
1674 if (pcbDataReturned)
1675 *pcbDataReturned = sizeof(*pInfo);
1676 }
1677 return rc;
1678}
1679
1680
1681static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
1682 size_t *pcbDataReturned)
1683{
1684 /*
1685 * Validate the client id and invalidate its entry while we're in the call.
1686 */
1687 int rc;
1688 const uint32_t u32ClientId = pInfo->u32ClientID;
1689 unsigned i;
1690 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1691 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1692 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1693 if (pSession->aHGCMClientIds[i] == u32ClientId)
1694 {
1695 pSession->aHGCMClientIds[i] = UINT32_MAX;
1696 break;
1697 }
1698 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1699 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1700 {
1701 static unsigned s_cErrors = 0;
1702 if (s_cErrors++ > 32)
1703 LogRel(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
1704 return VERR_INVALID_HANDLE;
1705 }
1706
1707 /*
1708 * The VbglHGCMConnect call will invoke the callback if the HGCM
1709 * call is performed in an ASYNC fashion. The function is not able
1710 * to deal with cancelled requests.
1711 */
1712 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
1713 rc = VbglR0HGCMInternalDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1714 if (RT_SUCCESS(rc))
1715 {
1716 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
1717 if (pcbDataReturned)
1718 *pcbDataReturned = sizeof(*pInfo);
1719 }
1720
1721 /* Update the client id array according to the result. */
1722 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1723 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
1724 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
1725 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1726
1727 return rc;
1728}
1729
1730
1731static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt,
1732 PVBOXGUESTSESSION pSession,
1733 VBoxGuestHGCMCallInfo *pInfo,
1734 uint32_t cMillies, bool fInterruptible, bool f32bit,
1735 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
1736{
1737 const uint32_t u32ClientId = pInfo->u32ClientID;
1738 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1739 uint32_t fFlags;
1740 size_t cbActual;
1741 unsigned i;
1742 int rc;
1743
1744 /*
1745 * Some more validations.
1746 */
1747 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
1748 {
1749 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
1750 return VERR_INVALID_PARAMETER;
1751 }
1752
1753 cbActual = cbExtra + sizeof(*pInfo);
1754#ifdef RT_ARCH_AMD64
1755 if (f32bit)
1756 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
1757 else
1758#endif
1759 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
1760 if (cbData < cbActual)
1761 {
1762 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
1763 cbData, cbActual));
1764 return VERR_INVALID_PARAMETER;
1765 }
1766
1767 /*
1768 * Validate the client id.
1769 */
1770 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1771 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1772 if (pSession->aHGCMClientIds[i] == u32ClientId)
1773 break;
1774 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1775 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
1776 {
1777 static unsigned s_cErrors = 0;
1778 if (s_cErrors++ > 32)
1779 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
1780 return VERR_INVALID_HANDLE;
1781 }
1782
1783 /*
1784 * The VbglHGCMCall call will invoke the callback if the HGCM
1785 * call is performed in an ASYNC fashion. This function can
1786 * deal with cancelled requests, so we let user more requests
1787 * be interruptible (should add a flag for this later I guess).
1788 */
1789 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
1790 fFlags = pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
1791#ifdef RT_ARCH_AMD64
1792 if (f32bit)
1793 {
1794 if (fInterruptible)
1795 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1796 else
1797 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1798 }
1799 else
1800#endif
1801 {
1802 if (fInterruptible)
1803 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1804 else
1805 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1806 }
1807 if (RT_SUCCESS(rc))
1808 {
1809 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
1810 if (pcbDataReturned)
1811 *pcbDataReturned = cbActual;
1812 }
1813 else
1814 {
1815 if ( rc != VERR_INTERRUPTED
1816 && rc != VERR_TIMEOUT)
1817 {
1818 static unsigned s_cErrors = 0;
1819 if (s_cErrors++ < 32)
1820 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
1821 }
1822 else
1823 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
1824 }
1825 return rc;
1826}
1827
1828
1829#endif /* VBOX_WITH_HGCM */
1830
1831/**
1832 * Handle VBOXGUEST_IOCTL_CHECK_BALLOON from R3.
1833 *
1834 * Ask the host for the size of the balloon and try to set it accordingly. If
1835 * this approach fails because it's not supported, return with fHandleInR3 set
1836 * and let the user land supply memory we can lock via the other ioctl.
1837 *
1838 * @returns VBox status code.
1839 *
1840 * @param pDevExt The device extension.
1841 * @param pSession The session.
1842 * @param pInfo The output buffer.
1843 * @param pcbDataReturned Where to store the amount of returned data. Can
1844 * be NULL.
1845 */
1846static int VBoxGuestCommonIOCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1847 VBoxGuestCheckBalloonInfo *pInfo, size_t *pcbDataReturned)
1848{
1849 VMMDevGetMemBalloonChangeRequest *pReq;
1850 int rc;
1851
1852 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON\n"));
1853 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
1854 AssertRCReturn(rc, rc);
1855
1856 /*
1857 * The first user trying to query/change the balloon becomes the
1858 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
1859 */
1860 if ( pDevExt->MemBalloon.pOwner != pSession
1861 && pDevExt->MemBalloon.pOwner == NULL)
1862 pDevExt->MemBalloon.pOwner = pSession;
1863
1864 if (pDevExt->MemBalloon.pOwner == pSession)
1865 {
1866 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
1867 if (RT_SUCCESS(rc))
1868 {
1869 /*
1870 * This is a response to that event. Setting this bit means that
1871 * we request the value from the host and change the guest memory
1872 * balloon according to this value.
1873 */
1874 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
1875 rc = VbglGRPerform(&pReq->header);
1876 if (RT_SUCCESS(rc))
1877 {
1878 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
1879 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
1880
1881 pInfo->cBalloonChunks = pReq->cBalloonChunks;
1882 pInfo->fHandleInR3 = false;
1883
1884 rc = vboxGuestSetBalloonSizeKernel(pDevExt, pReq->cBalloonChunks, &pInfo->fHandleInR3);
1885 /* Ignore various out of memory failures. */
1886 if ( rc == VERR_NO_MEMORY
1887 || rc == VERR_NO_PHYS_MEMORY
1888 || rc == VERR_NO_CONT_MEMORY)
1889 rc = VINF_SUCCESS;
1890
1891 if (pcbDataReturned)
1892 *pcbDataReturned = sizeof(VBoxGuestCheckBalloonInfo);
1893 }
1894 else
1895 LogRel(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON: VbglGRPerform failed. rc=%Rrc\n", rc));
1896 VbglGRFree(&pReq->header);
1897 }
1898 }
1899 else
1900 rc = VERR_PERMISSION_DENIED;
1901
1902 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
1903 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON returns %Rrc\n", rc));
1904 return rc;
1905}
1906
1907
1908/**
1909 * Handle a request for changing the memory balloon.
1910 *
1911 * @returns VBox status code.
1912 *
1913 * @param pDevExt The device extention.
1914 * @param pSession The session.
1915 * @param pInfo The change request structure (input).
1916 * @param pcbDataReturned Where to store the amount of returned data. Can
1917 * be NULL.
1918 */
1919static int VBoxGuestCommonIOCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1920 VBoxGuestChangeBalloonInfo *pInfo, size_t *pcbDataReturned)
1921{
1922 int rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
1923 AssertRCReturn(rc, rc);
1924
1925 if (!pDevExt->MemBalloon.fUseKernelAPI)
1926 {
1927 /*
1928 * The first user trying to query/change the balloon becomes the
1929 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
1930 */
1931 if ( pDevExt->MemBalloon.pOwner != pSession
1932 && pDevExt->MemBalloon.pOwner == NULL)
1933 pDevExt->MemBalloon.pOwner = pSession;
1934
1935 if (pDevExt->MemBalloon.pOwner == pSession)
1936 {
1937 rc = vboxGuestSetBalloonSizeFromUser(pDevExt, pSession, pInfo->u64ChunkAddr, !!pInfo->fInflate);
1938 if (pcbDataReturned)
1939 *pcbDataReturned = 0;
1940 }
1941 else
1942 rc = VERR_PERMISSION_DENIED;
1943 }
1944 else
1945 rc = VERR_PERMISSION_DENIED;
1946
1947 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
1948 return rc;
1949}
1950
1951
1952/**
1953 * Handle a request for writing a core dump of the guest on the host.
1954 *
1955 * @returns VBox status code.
1956 *
1957 * @param pDevExt The device extension.
1958 * @param pInfo The output buffer.
1959 */
1960static int VBoxGuestCommonIOCtl_WriteCoreDump(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWriteCoreDump *pInfo)
1961{
1962 VMMDevReqWriteCoreDump *pReq = NULL;
1963 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_WriteCoreDump);
1964 if (RT_FAILURE(rc))
1965 {
1966 Log(("VBoxGuestCommonIOCtl: WRITE_CORE_DUMP: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1967 sizeof(*pReq), sizeof(*pReq), rc));
1968 return rc;
1969 }
1970
1971 pReq->fFlags = pInfo->fFlags;
1972 rc = VbglGRPerform(&pReq->header);
1973 if (RT_FAILURE(rc))
1974 Log(("VBoxGuestCommonIOCtl: WRITE_CORE_DUMP: VbglGRPerform failed, rc=%Rrc!\n", rc));
1975
1976 VbglGRFree(&pReq->header);
1977 return rc;
1978}
1979
1980
1981#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
1982/**
1983 * Enables the VRDP session and saves its session ID.
1984 *
1985 * @returns VBox status code.
1986 *
1987 * @param pDevExt The device extention.
1988 * @param pSession The session.
1989 */
1990static int VBoxGuestCommonIOCtl_EnableVRDPSession(VBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1991{
1992 /* Nothing to do here right now, since this only is supported on Windows at the moment. */
1993 return VERR_NOT_IMPLEMENTED;
1994}
1995
1996
1997/**
1998 * Disables the VRDP session.
1999 *
2000 * @returns VBox status code.
2001 *
2002 * @param pDevExt The device extention.
2003 * @param pSession The session.
2004 */
2005static int VBoxGuestCommonIOCtl_DisableVRDPSession(VBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2006{
2007 /* Nothing to do here right now, since this only is supported on Windows at the moment. */
2008 return VERR_NOT_IMPLEMENTED;
2009}
2010#endif /* VBOX_WITH_VRDP_SESSION_HANDLING */
2011
2012#ifdef DEBUG
2013/** Unit test SetMouseStatus instead of really executing the request. */
2014static bool g_test_fSetMouseStatus = false;
2015/** When unit testing SetMouseStatus, the fake RC for the GR to return. */
2016static int g_test_SetMouseStatusGRRC;
2017/** When unit testing SetMouseStatus this will be set to the status passed to
2018 * the GR. */
2019static uint32_t g_test_statusSetMouseStatus;
2020#endif
2021
2022static int vboxguestcommonSetMouseStatus(uint32_t fFeatures)
2023{
2024 VMMDevReqMouseStatus *pReq;
2025 int rc;
2026
2027 LogRelFlowFunc(("fFeatures=%u\n", (int) fFeatures));
2028 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetMouseStatus);
2029 if (RT_SUCCESS(rc))
2030 {
2031 pReq->mouseFeatures = fFeatures;
2032 pReq->pointerXPos = 0;
2033 pReq->pointerYPos = 0;
2034#ifdef DEBUG
2035 if (g_test_fSetMouseStatus)
2036 {
2037 g_test_statusSetMouseStatus = pReq->mouseFeatures;
2038 rc = g_test_SetMouseStatusGRRC;
2039 }
2040 else
2041#endif
2042 rc = VbglGRPerform(&pReq->header);
2043 VbglGRFree(&pReq->header);
2044 }
2045 LogRelFlowFunc(("rc=%Rrc\n", rc));
2046 return rc;
2047}
2048
2049
2050/**
2051 * Sets the mouse status features for this session and updates them
2052 * globally. We aim to ensure that if several threads call this in
2053 * parallel the most recent status will always end up being set.
2054 *
2055 * @returns VBox status code.
2056 *
2057 * @param pDevExt The device extention.
2058 * @param pSession The session.
2059 * @param fFeatures New bitmap of enabled features.
2060 */
2061static int VBoxGuestCommonIOCtl_SetMouseStatus(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t fFeatures)
2062{
2063 unsigned i;
2064 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
2065 uint32_t fNewDevExtStatus = 0;
2066 int rc;
2067 /* Exit early if nothing has changed - hack to work around the
2068 * Windows Additions not using the common code. */
2069 bool fNoAction;
2070
2071 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
2072 for (i = 0; i < sizeof(fFeatures) * 8; ++i)
2073 {
2074 if (RT_BIT_32(i) & VMMDEV_MOUSE_GUEST_MASK)
2075 {
2076 if ( (RT_BIT_32(i) & fFeatures)
2077 && !(RT_BIT_32(i) & pSession->fMouseStatus))
2078 ++pDevExt->cMouseFeatureUsage[i];
2079 else if ( !(RT_BIT_32(i) & fFeatures)
2080 && (RT_BIT_32(i) & pSession->fMouseStatus))
2081 --pDevExt->cMouseFeatureUsage[i];
2082 }
2083 if (pDevExt->cMouseFeatureUsage[i] > 0)
2084 fNewDevExtStatus |= RT_BIT_32(i);
2085 }
2086 pSession->fMouseStatus = fFeatures & VMMDEV_MOUSE_GUEST_MASK;
2087 fNoAction = (pDevExt->fMouseStatus == fNewDevExtStatus);
2088 pDevExt->fMouseStatus = fNewDevExtStatus;
2089 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
2090 if (fNoAction)
2091 return VINF_SUCCESS;
2092 do
2093 {
2094 fNewDevExtStatus = pDevExt->fMouseStatus;
2095 rc = vboxguestcommonSetMouseStatus(fNewDevExtStatus);
2096 } while(RT_SUCCESS(rc) && fNewDevExtStatus != pDevExt->fMouseStatus);
2097 return rc;
2098}
2099
2100
2101#ifdef DEBUG
2102/** Unit test for the SET_MOUSE_STATUS IoCtl. Since this is closely tied to
2103 * the code in question it probably makes most sense to keep it next to the
2104 * code. */
2105static void testSetMouseStatus(void)
2106{
2107 int cFailures = 0;
2108 uint32_t u32Data;
2109 int rc;
2110 RTSPINLOCK Spinlock;
2111
2112 g_test_fSetMouseStatus = true;
2113 rc = RTSpinlockCreate(&Spinlock);
2114 AssertRCReturnVoid(rc);
2115 {
2116 VBOXGUESTDEVEXT DevExt = { 0 };
2117 VBOXGUESTSESSION Session = { 0 };
2118
2119 g_test_statusSetMouseStatus = ~0;
2120 g_test_SetMouseStatusGRRC = VINF_SUCCESS;
2121 DevExt.SessionSpinlock = Spinlock;
2122 u32Data = VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE;
2123 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2124 &Session, &u32Data, sizeof(u32Data), NULL);
2125 AssertRCSuccess(rc);
2126 AssertMsg( g_test_statusSetMouseStatus
2127 == VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE,
2128 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2129 DevExt.cMouseFeatureUsage[ASMBitFirstSetU32(VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR) - 1] = 1;
2130 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2131 &Session, &u32Data, sizeof(u32Data), NULL);
2132 AssertRCSuccess(rc);
2133 AssertMsg( g_test_statusSetMouseStatus
2134 == ( VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE
2135 | VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR),
2136 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2137 u32Data = VMMDEV_MOUSE_HOST_WANTS_ABSOLUTE; /* Can't change this */
2138 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2139 &Session, &u32Data, sizeof(u32Data), NULL);
2140 AssertRCSuccess(rc);
2141 AssertMsg( g_test_statusSetMouseStatus
2142 == VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR,
2143 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2144 u32Data = VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR;
2145 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2146 &Session, &u32Data, sizeof(u32Data), NULL);
2147 AssertRCSuccess(rc);
2148 AssertMsg( g_test_statusSetMouseStatus
2149 == VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR,
2150 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2151 u32Data = 0;
2152 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2153 &Session, &u32Data, sizeof(u32Data), NULL);
2154 AssertRCSuccess(rc);
2155 AssertMsg( g_test_statusSetMouseStatus
2156 == VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR,
2157 ("Actual status: 0x%x\n", g_test_statusSetMouseStatus));
2158 AssertMsg(DevExt.cMouseFeatureUsage[ASMBitFirstSetU32(VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR) - 1] == 1,
2159 ("Actual value: %d\n", DevExt.cMouseFeatureUsage[ASMBitFirstSetU32(VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR)]));
2160 g_test_SetMouseStatusGRRC = VERR_UNRESOLVED_ERROR;
2161 /* This should succeed as the host request should not be made
2162 * since nothing has changed. */
2163 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2164 &Session, &u32Data, sizeof(u32Data), NULL);
2165 AssertRCSuccess(rc);
2166 /* This should fail with VERR_UNRESOLVED_ERROR as set above. */
2167 u32Data = VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE;
2168 rc = VBoxGuestCommonIOCtl(VBOXGUEST_IOCTL_SET_MOUSE_STATUS, &DevExt,
2169 &Session, &u32Data, sizeof(u32Data), NULL);
2170 AssertMsg(rc == VERR_UNRESOLVED_ERROR, ("rc == %Rrc\n", rc));
2171 /* Untested paths: out of memory; race setting status to host */
2172 }
2173 RTSpinlockDestroy(Spinlock);
2174 g_test_fSetMouseStatus = false;
2175}
2176#endif
2177
2178
2179/**
2180 * Guest backdoor logging.
2181 *
2182 * @returns VBox status code.
2183 *
2184 * @param pch The log message (need not be NULL terminated).
2185 * @param cbData Size of the buffer.
2186 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2187 */
2188static int VBoxGuestCommonIOCtl_Log(const char *pch, size_t cbData, size_t *pcbDataReturned)
2189{
2190 NOREF(pch);
2191 NOREF(cbData);
2192 Log(("%.*s", cbData, pch));
2193 if (pcbDataReturned)
2194 *pcbDataReturned = 0;
2195 return VINF_SUCCESS;
2196}
2197
2198
2199/**
2200 * Common IOCtl for user to kernel and kernel to kernel communication.
2201 *
2202 * This function only does the basic validation and then invokes
2203 * worker functions that takes care of each specific function.
2204 *
2205 * @returns VBox status code.
2206 *
2207 * @param iFunction The requested function.
2208 * @param pDevExt The device extension.
2209 * @param pSession The client session.
2210 * @param pvData The input/output data buffer. Can be NULL depending on the function.
2211 * @param cbData The max size of the data buffer.
2212 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2213 */
2214int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2215 void *pvData, size_t cbData, size_t *pcbDataReturned)
2216{
2217 int rc;
2218 Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
2219 iFunction, pDevExt, pSession, pvData, cbData));
2220
2221 /*
2222 * Make sure the returned data size is set to zero.
2223 */
2224 if (pcbDataReturned)
2225 *pcbDataReturned = 0;
2226
2227 /*
2228 * Define some helper macros to simplify validation.
2229 */
2230#define CHECKRET_RING0(mnemonic) \
2231 do { \
2232 if (pSession->R0Process != NIL_RTR0PROCESS) \
2233 { \
2234 LogFunc((mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
2235 pSession->Process, (uintptr_t)pSession->R0Process)); \
2236 return VERR_PERMISSION_DENIED; \
2237 } \
2238 } while (0)
2239#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
2240 do { \
2241 if (cbData < (cbMin)) \
2242 { \
2243 LogFunc((mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
2244 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
2245 return VERR_BUFFER_OVERFLOW; \
2246 } \
2247 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
2248 { \
2249 LogFunc((mnemonic ": Invalid pointer %p\n", pvData)); \
2250 return VERR_INVALID_POINTER; \
2251 } \
2252 } while (0)
2253#define CHECKRET_SIZE(mnemonic, cb) \
2254 do { \
2255 if (cbData != (cb)) \
2256 { \
2257 LogFunc((mnemonic ": cbData=%#zx (%zu) expected is %#zx (%zu)\n", \
2258 cbData, cbData, (size_t)(cb), (size_t)(cb))); \
2259 return VERR_BUFFER_OVERFLOW; \
2260 } \
2261 if ((cb) != 0 && !VALID_PTR(pvData)) \
2262 { \
2263 LogFunc((mnemonic ": Invalid pointer %p\n", pvData)); \
2264 return VERR_INVALID_POINTER; \
2265 } \
2266 } while (0)
2267
2268
2269 /*
2270 * Deal with variably sized requests first.
2271 */
2272 rc = VINF_SUCCESS;
2273 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
2274 {
2275 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
2276 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
2277 }
2278#ifdef VBOX_WITH_HGCM
2279 /*
2280 * These ones are a bit tricky.
2281 */
2282 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
2283 {
2284 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2285 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2286 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2287 fInterruptible, false /*f32bit*/,
2288 0, cbData, pcbDataReturned);
2289 }
2290 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
2291 {
2292 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2293 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2294 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2295 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2296 false /*f32bit*/,
2297 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2298 }
2299# ifdef RT_ARCH_AMD64
2300 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
2301 {
2302 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2303 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2304 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2305 fInterruptible, true /*f32bit*/,
2306 0, cbData, pcbDataReturned);
2307 }
2308 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
2309 {
2310 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2311 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2312 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2313 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2314 true /*f32bit*/,
2315 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2316 }
2317# endif
2318#endif /* VBOX_WITH_HGCM */
2319 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
2320 {
2321 CHECKRET_MIN_SIZE("LOG", 1);
2322 rc = VBoxGuestCommonIOCtl_Log((char *)pvData, cbData, pcbDataReturned);
2323 }
2324 else
2325 {
2326 switch (iFunction)
2327 {
2328 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
2329 CHECKRET_RING0("GETVMMDEVPORT");
2330 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
2331 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
2332 break;
2333
2334 case VBOXGUEST_IOCTL_WAITEVENT:
2335 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
2336 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, pSession, (VBoxGuestWaitEventInfo *)pvData,
2337 pcbDataReturned, pSession->R0Process != NIL_RTR0PROCESS);
2338 break;
2339
2340 case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
2341 if (cbData != 0)
2342 rc = VERR_INVALID_PARAMETER;
2343 rc = VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
2344 break;
2345
2346 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
2347 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
2348 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, (VBoxGuestFilterMaskInfo *)pvData);
2349 break;
2350
2351#ifdef VBOX_WITH_HGCM
2352 case VBOXGUEST_IOCTL_HGCM_CONNECT:
2353# ifdef RT_ARCH_AMD64
2354 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
2355# endif
2356 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
2357 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
2358 break;
2359
2360 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
2361# ifdef RT_ARCH_AMD64
2362 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
2363# endif
2364 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
2365 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
2366 break;
2367#endif /* VBOX_WITH_HGCM */
2368
2369 case VBOXGUEST_IOCTL_CHECK_BALLOON:
2370 CHECKRET_MIN_SIZE("CHECK_MEMORY_BALLOON", sizeof(VBoxGuestCheckBalloonInfo));
2371 rc = VBoxGuestCommonIOCtl_CheckMemoryBalloon(pDevExt, pSession, (VBoxGuestCheckBalloonInfo *)pvData, pcbDataReturned);
2372 break;
2373
2374 case VBOXGUEST_IOCTL_CHANGE_BALLOON:
2375 CHECKRET_MIN_SIZE("CHANGE_MEMORY_BALLOON", sizeof(VBoxGuestChangeBalloonInfo));
2376 rc = VBoxGuestCommonIOCtl_ChangeMemoryBalloon(pDevExt, pSession, (VBoxGuestChangeBalloonInfo *)pvData, pcbDataReturned);
2377 break;
2378
2379 case VBOXGUEST_IOCTL_WRITE_CORE_DUMP:
2380 CHECKRET_MIN_SIZE("WRITE_CORE_DUMP", sizeof(VBoxGuestWriteCoreDump));
2381 rc = VBoxGuestCommonIOCtl_WriteCoreDump(pDevExt, (VBoxGuestWriteCoreDump *)pvData);
2382 break;
2383
2384#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
2385 case VBOXGUEST_IOCTL_ENABLE_VRDP_SESSION:
2386 rc = VBoxGuestCommonIOCtl_EnableVRDPSession(pDevExt, pSession);
2387 break;
2388
2389 case VBOXGUEST_IOCTL_DISABLE_VRDP_SESSION:
2390 rc = VBoxGuestCommonIOCtl_DisableVRDPSession(pDevExt, pSession);
2391 break;
2392#endif /* VBOX_WITH_VRDP_SESSION_HANDLING */
2393 case VBOXGUEST_IOCTL_SET_MOUSE_STATUS:
2394 CHECKRET_SIZE("SET_MOUSE_STATUS", sizeof(uint32_t));
2395 rc = VBoxGuestCommonIOCtl_SetMouseStatus(pDevExt, pSession,
2396 *(uint32_t *)pvData);
2397 break;
2398
2399 default:
2400 {
2401 LogRel(("VBoxGuestCommonIOCtl: Unknown request iFunction=%#x Stripped size=%#x\n", iFunction,
2402 VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
2403 rc = VERR_NOT_SUPPORTED;
2404 break;
2405 }
2406 }
2407 }
2408
2409 Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
2410 return rc;
2411}
2412
2413
2414
2415/**
2416 * Common interrupt service routine.
2417 *
2418 * This deals with events and with waking up thread waiting for those events.
2419 *
2420 * @returns true if it was our interrupt, false if it wasn't.
2421 * @param pDevExt The VBoxGuest device extension.
2422 */
2423bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
2424{
2425 bool fMousePositionChanged = false;
2426 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
2427 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
2428 int rc = 0;
2429 bool fOurIrq;
2430
2431 /*
2432 * Make sure we've initialized the device extension.
2433 */
2434 if (RT_UNLIKELY(!pReq))
2435 return false;
2436
2437 /*
2438 * Enter the spinlock and check if it's our IRQ or not.
2439 */
2440 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
2441 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
2442 if (fOurIrq)
2443 {
2444 /*
2445 * Acknowlegde events.
2446 * We don't use VbglGRPerform here as it may take another spinlocks.
2447 */
2448 pReq->header.rc = VERR_INTERNAL_ERROR;
2449 pReq->events = 0;
2450 ASMCompilerBarrier();
2451 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
2452 ASMCompilerBarrier(); /* paranoia */
2453 if (RT_SUCCESS(pReq->header.rc))
2454 {
2455 uint32_t fEvents = pReq->events;
2456 PVBOXGUESTWAIT pWait;
2457 PVBOXGUESTWAIT pSafe;
2458
2459 Log(("VBoxGuestCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
2460
2461 /*
2462 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
2463 */
2464 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
2465 {
2466 fMousePositionChanged = true;
2467 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
2468 }
2469
2470#ifdef VBOX_WITH_HGCM
2471 /*
2472 * The HGCM event/list is kind of different in that we evaluate all entries.
2473 */
2474 if (fEvents & VMMDEV_EVENT_HGCM)
2475 {
2476 RTListForEachSafe(&pDevExt->HGCMWaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
2477 {
2478 if (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE)
2479 {
2480 pWait->fResEvents = VMMDEV_EVENT_HGCM;
2481 RTListNodeRemove(&pWait->ListNode);
2482# ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2483 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
2484# else
2485 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
2486 rc |= RTSemEventMultiSignal(pWait->Event);
2487# endif
2488 }
2489 }
2490 fEvents &= ~VMMDEV_EVENT_HGCM;
2491 }
2492#endif
2493
2494 /*
2495 * Normal FIFO waiter evaluation.
2496 */
2497 fEvents |= pDevExt->f32PendingEvents;
2498 RTListForEachSafe(&pDevExt->WaitList, pWait, pSafe, VBOXGUESTWAIT, ListNode)
2499 {
2500 if ( (pWait->fReqEvents & fEvents)
2501 && !pWait->fResEvents)
2502 {
2503 pWait->fResEvents = pWait->fReqEvents & fEvents;
2504 fEvents &= ~pWait->fResEvents;
2505 RTListNodeRemove(&pWait->ListNode);
2506#ifdef VBOXGUEST_USE_DEFERRED_WAKE_UP
2507 RTListAppend(&pDevExt->WakeUpList, &pWait->ListNode);
2508#else
2509 RTListAppend(&pDevExt->WokenUpList, &pWait->ListNode);
2510 rc |= RTSemEventMultiSignal(pWait->Event);
2511#endif
2512 if (!fEvents)
2513 break;
2514 }
2515 }
2516 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
2517 }
2518 else /* something is serious wrong... */
2519 Log(("VBoxGuestCommonISR: acknowledge events failed rc=%Rrc (events=%#x)!!\n",
2520 pReq->header.rc, pReq->events));
2521 }
2522 else
2523 LogFlow(("VBoxGuestCommonISR: not ours\n"));
2524
2525 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
2526
2527#if defined(VBOXGUEST_USE_DEFERRED_WAKE_UP) && !defined(RT_OS_WINDOWS)
2528 /*
2529 * Do wake-ups.
2530 * Note. On Windows this isn't possible at this IRQL, so a DPC will take
2531 * care of it.
2532 */
2533 VBoxGuestWaitDoWakeUps(pDevExt);
2534#endif
2535
2536 /*
2537 * Work the poll and async notification queues on OSes that implements that.
2538 * (Do this outside the spinlock to prevent some recursive spinlocking.)
2539 */
2540 if (fMousePositionChanged)
2541 {
2542 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
2543 VBoxGuestNativeISRMousePollEvent(pDevExt);
2544 }
2545
2546 Assert(rc == 0);
2547 return fOurIrq;
2548}
2549
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette