VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 32011

Last change on this file since 32011 was 31752, checked in by vboxsync, 15 years ago

VBoxGuest.cpp: A shadowed variable (Info), identing.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 81.7 KB
Line 
1/* $Id: VBoxGuest.cpp 31752 2010-08-18 11:25:40Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEFAULT
23#include "VBoxGuestInternal.h"
24#include "VBoxGuest2.h"
25#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
26#include <VBox/log.h>
27#include <iprt/mem.h>
28#include <iprt/time.h>
29#include <iprt/memobj.h>
30#include <iprt/asm.h>
31#include <iprt/asm-amd64-x86.h>
32#include <iprt/string.h>
33#include <iprt/process.h>
34#include <iprt/assert.h>
35#include <iprt/param.h>
36#ifdef VBOX_WITH_HGCM
37# include <iprt/thread.h>
38#endif
39#include "version-generated.h"
40#if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
41# include "revision-generated.h"
42#endif
43
44
45/*******************************************************************************
46* Internal Functions *
47*******************************************************************************/
48#ifdef VBOX_WITH_HGCM
49static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
50#endif
51
52
53/*******************************************************************************
54* Global Variables *
55*******************************************************************************/
56static const size_t cbChangeMemBalloonReq = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
57
58
59
60/**
61 * Reserves memory in which the VMM can relocate any guest mappings
62 * that are floating around.
63 *
64 * This operation is a little bit tricky since the VMM might not accept
65 * just any address because of address clashes between the three contexts
66 * it operates in, so use a small stack to perform this operation.
67 *
68 * @returns VBox status code (ignored).
69 * @param pDevExt The device extension.
70 */
71static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
72{
73 /*
74 * Query the required space.
75 */
76 VMMDevReqHypervisorInfo *pReq;
77 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
78 if (RT_FAILURE(rc))
79 return rc;
80 pReq->hypervisorStart = 0;
81 pReq->hypervisorSize = 0;
82 rc = VbglGRPerform(&pReq->header);
83 if (RT_FAILURE(rc)) /* this shouldn't happen! */
84 {
85 VbglGRFree(&pReq->header);
86 return rc;
87 }
88
89 /*
90 * The VMM will report back if there is nothing it wants to map, like for
91 * insance in VT-x and AMD-V mode.
92 */
93 if (pReq->hypervisorSize == 0)
94 Log(("vboxGuestInitFixateGuestMappings: nothing to do\n"));
95 else
96 {
97 /*
98 * We have to try several times since the host can be picky
99 * about certain addresses.
100 */
101 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
102 uint32_t cbHypervisor = pReq->hypervisorSize;
103 RTR0MEMOBJ ahTries[5];
104 uint32_t iTry;
105 bool fBitched = false;
106 Log(("vboxGuestInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
107 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
108 {
109 /*
110 * Reserve space, or if that isn't supported, create a object for
111 * some fictive physical memory and map that in to kernel space.
112 *
113 * To make the code a bit uglier, most systems cannot help with
114 * 4MB alignment, so we have to deal with that in addition to
115 * having two ways of getting the memory.
116 */
117 uint32_t uAlignment = _4M;
118 RTR0MEMOBJ hObj;
119 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
120 if (rc == VERR_NOT_SUPPORTED)
121 {
122 uAlignment = PAGE_SIZE;
123 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
124 }
125 if (rc == VERR_NOT_SUPPORTED)
126 {
127 if (hFictive == NIL_RTR0MEMOBJ)
128 {
129 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
130 if (RT_FAILURE(rc))
131 break;
132 hFictive = hObj;
133 }
134 uAlignment = _4M;
135 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
136 if (rc == VERR_NOT_SUPPORTED)
137 {
138 uAlignment = PAGE_SIZE;
139 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
140 }
141 }
142 if (RT_FAILURE(rc))
143 {
144 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
145 rc, cbHypervisor, uAlignment, iTry));
146 fBitched = true;
147 break;
148 }
149
150 /*
151 * Try set it.
152 */
153 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
154 pReq->header.rc = VERR_INTERNAL_ERROR;
155 pReq->hypervisorSize = cbHypervisor;
156 pReq->hypervisorStart = (uintptr_t)RTR0MemObjAddress(hObj);
157 if ( uAlignment == PAGE_SIZE
158 && pReq->hypervisorStart & (_4M - 1))
159 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
160 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
161
162 rc = VbglGRPerform(&pReq->header);
163 if (RT_SUCCESS(rc))
164 {
165 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
166 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
167 RTR0MemObjAddress(pDevExt->hGuestMappings),
168 RTR0MemObjSize(pDevExt->hGuestMappings),
169 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
170 break;
171 }
172 ahTries[iTry] = hObj;
173 }
174
175 /*
176 * Cleanup failed attempts.
177 */
178 while (iTry-- > 0)
179 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
180 if ( RT_FAILURE(rc)
181 && hFictive != NIL_RTR0PTR)
182 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
183 if (RT_FAILURE(rc) && !fBitched)
184 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
185 }
186 VbglGRFree(&pReq->header);
187
188 /*
189 * We ignore failed attempts for now.
190 */
191 return VINF_SUCCESS;
192}
193
194
195/**
196 * Undo what vboxGuestInitFixateGuestMappings did.
197 *
198 * @param pDevExt The device extension.
199 */
200static void vboxGuestTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
201{
202 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
203 {
204 /*
205 * Tell the host that we're going to free the memory we reserved for
206 * it, the free it up. (Leak the memory if anything goes wrong here.)
207 */
208 VMMDevReqHypervisorInfo *pReq;
209 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
210 if (RT_SUCCESS(rc))
211 {
212 pReq->hypervisorStart = 0;
213 pReq->hypervisorSize = 0;
214 rc = VbglGRPerform(&pReq->header);
215 VbglGRFree(&pReq->header);
216 }
217 if (RT_SUCCESS(rc))
218 {
219 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
220 AssertRC(rc);
221 }
222 else
223 LogRel(("vboxGuestTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
224
225 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
226 }
227}
228
229
230/**
231 * Sets the interrupt filter mask during initialization and termination.
232 *
233 * This will ASSUME that we're the ones in carge over the mask, so
234 * we'll simply clear all bits we don't set.
235 *
236 * @returns VBox status code (ignored).
237 * @param pDevExt The device extension.
238 * @param fMask The new mask.
239 */
240static int vboxGuestSetFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask)
241{
242 VMMDevCtlGuestFilterMask *pReq;
243 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
244 if (RT_SUCCESS(rc))
245 {
246 pReq->u32OrMask = fMask;
247 pReq->u32NotMask = ~fMask;
248 rc = VbglGRPerform(&pReq->header);
249 if (RT_FAILURE(rc))
250 LogRel(("vboxGuestSetFilterMask: failed with rc=%Rrc\n", rc));
251 VbglGRFree(&pReq->header);
252 }
253 return rc;
254}
255
256
257/**
258 * Inflate the balloon by one chunk represented by an R0 memory object.
259 *
260 * The caller owns the balloon mutex.
261 *
262 * @returns IPRT status code.
263 * @param pMemObj Pointer to the R0 memory object.
264 * @param pReq The pre-allocated request for performing the VMMDev call.
265 */
266static int vboxGuestBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
267{
268 uint32_t iPage;
269 int rc;
270
271 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
272 {
273 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
274 pReq->aPhysPage[iPage] = phys;
275 }
276
277 /* Protect this memory from being accessed. Doesn't work on every platform and probably
278 * doesn't work for R3-provided memory, therefore ignore the return value. Unprotect
279 * done when object is freed. */
280 RTR0MemObjProtect(*pMemObj, 0, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, RTMEM_PROT_NONE);
281
282 pReq->fInflate = true;
283 pReq->header.size = cbChangeMemBalloonReq;
284 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
285
286 rc = VbglGRPerform(&pReq->header);
287 if (RT_FAILURE(rc))
288 LogRel(("vboxGuestBalloonInflate: VbglGRPerform failed. rc=%Rrc\n", rc));
289 return rc;
290}
291
292
293/**
294 * Deflate the balloon by one chunk - info the host and free the memory object.
295 *
296 * The caller owns the balloon mutex.
297 *
298 * @returns IPRT status code.
299 * @param pMemObj Pointer to the R0 memory object.
300 * The memory object will be freed afterwards.
301 * @param pReq The pre-allocated request for performing the VMMDev call.
302 */
303static int vboxGuestBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
304{
305 uint32_t iPage;
306 int rc;
307
308 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
309 {
310 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
311 pReq->aPhysPage[iPage] = phys;
312 }
313
314 pReq->fInflate = false;
315 pReq->header.size = cbChangeMemBalloonReq;
316 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
317
318 rc = VbglGRPerform(&pReq->header);
319 if (RT_FAILURE(rc))
320 {
321 LogRel(("vboxGuestBalloonDeflate: VbglGRPerform failed. rc=%Rrc\n", rc));
322 return rc;
323 }
324
325 /* undo previous protec call, ignore rc for reasons stated there. */
326 RTR0MemObjProtect(*pMemObj, 0, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
327 /*RTR0MemObjProtect(*pMemObj, 0, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC); - probably not safe... */
328
329 rc = RTR0MemObjFree(*pMemObj, true);
330 if (RT_FAILURE(rc))
331 {
332 LogRel(("vboxGuestBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
333 return rc;
334 }
335
336 *pMemObj = NIL_RTR0MEMOBJ;
337 return VINF_SUCCESS;
338}
339
340
341/**
342 * Inflate/deflate the memory balloon and notify the host.
343 *
344 * This is a worker used by VBoxGuestCommonIOCtl_CheckMemoryBalloon - it takes
345 * the mutex.
346 *
347 * @returns VBox status code.
348 * @param pDevExt The device extension.
349 * @param pSession The session.
350 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
351 * @param pfHandleInR3 Where to return the handle-in-ring3 indicator
352 * (VINF_SUCCESS if set).
353 */
354static int vboxGuestSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, uint32_t *pfHandleInR3)
355{
356 int rc = VINF_SUCCESS;
357
358 if (pDevExt->MemBalloon.fUseKernelAPI)
359 {
360 VMMDevChangeMemBalloon *pReq;
361 uint32_t i;
362
363 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
364 {
365 LogRel(("vboxGuestSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
366 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
367 return VERR_INVALID_PARAMETER;
368 }
369
370 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
371 return VINF_SUCCESS; /* nothing to do */
372
373 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
374 && !pDevExt->MemBalloon.paMemObj)
375 {
376 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
377 if (!pDevExt->MemBalloon.paMemObj)
378 {
379 LogRel(("VBoxGuestSetBalloonSizeKernel: no memory for paMemObj!\n"));
380 return VERR_NO_MEMORY;
381 }
382 }
383
384 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
385 if (RT_FAILURE(rc))
386 return rc;
387
388 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
389 {
390 /* inflate */
391 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
392 {
393 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
394 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
395 if (RT_FAILURE(rc))
396 {
397 if (rc == VERR_NOT_SUPPORTED)
398 {
399 /* not supported -- fall back to the R3-allocated memory. */
400 rc = VINF_SUCCESS;
401 pDevExt->MemBalloon.fUseKernelAPI = false;
402 Assert(pDevExt->MemBalloon.cChunks == 0);
403 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
404 }
405 /* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
406 * cannot allocate more memory => don't try further, just stop here */
407 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
408 break;
409 }
410
411 rc = vboxGuestBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
412 if (RT_FAILURE(rc))
413 {
414 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
415 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
416 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
417 break;
418 }
419 pDevExt->MemBalloon.cChunks++;
420 }
421 }
422 else
423 {
424 /* deflate */
425 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
426 {
427 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
428 if (RT_FAILURE(rc))
429 {
430 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
431 break;
432 }
433 pDevExt->MemBalloon.cChunks--;
434 }
435 }
436
437 VbglGRFree(&pReq->header);
438 }
439
440 /*
441 * Set the handle-in-ring3 indicator. When set Ring-3 will have to work
442 * the balloon changes via the other API.
443 */
444 *pfHandleInR3 = pDevExt->MemBalloon.fUseKernelAPI ? false : true;
445
446 return rc;
447}
448
449
450/**
451 * Inflate/deflate the balloon by one chunk.
452 *
453 * Worker for VBoxGuestCommonIOCtl_ChangeMemoryBalloon - it takes the mutex.
454 *
455 * @returns VBox status code.
456 * @param pDevExt The device extension.
457 * @param pSession The session.
458 * @param u64ChunkAddr The address of the chunk to add to / remove from the
459 * balloon.
460 * @param fInflate Inflate if true, deflate if false.
461 */
462static int vboxGuestSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
463 uint64_t u64ChunkAddr, bool fInflate)
464{
465 VMMDevChangeMemBalloon *pReq;
466 int rc = VINF_SUCCESS;
467 uint32_t i;
468 PRTR0MEMOBJ pMemObj = NULL;
469
470 if (fInflate)
471 {
472 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
473 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
474 {
475 LogRel(("vboxGuestSetBalloonSize: cannot inflate balloon, already have %u chunks (max=%u)\n",
476 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
477 return VERR_INVALID_PARAMETER;
478 }
479
480 if (!pDevExt->MemBalloon.paMemObj)
481 {
482 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
483 if (!pDevExt->MemBalloon.paMemObj)
484 {
485 LogRel(("VBoxGuestSetBalloonSizeFromUser: no memory for paMemObj!\n"));
486 return VERR_NO_MEMORY;
487 }
488 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
489 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
490 }
491 }
492 else
493 {
494 if (pDevExt->MemBalloon.cChunks == 0)
495 {
496 AssertMsgFailed(("vboxGuestSetBalloonSize: cannot decrease balloon, already at size 0\n"));
497 return VERR_INVALID_PARAMETER;
498 }
499 }
500
501 /*
502 * Enumerate all memory objects and check if the object is already registered.
503 */
504 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
505 {
506 if ( fInflate
507 && !pMemObj
508 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
509 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
510 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == u64ChunkAddr)
511 {
512 if (fInflate)
513 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
514 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
515 break;
516 }
517 }
518 if (!pMemObj)
519 {
520 if (fInflate)
521 {
522 /* no free object pointer found -- should not happen */
523 return VERR_NO_MEMORY;
524 }
525
526 /* cannot free this memory as it wasn't provided before */
527 return VERR_NOT_FOUND;
528 }
529
530 /*
531 * Try inflate / defalte the balloon as requested.
532 */
533 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
534 if (RT_FAILURE(rc))
535 return rc;
536
537 if (fInflate)
538 {
539 rc = RTR0MemObjLockUser(pMemObj, u64ChunkAddr, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
540 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
541 if (RT_SUCCESS(rc))
542 {
543 rc = vboxGuestBalloonInflate(pMemObj, pReq);
544 if (RT_SUCCESS(rc))
545 pDevExt->MemBalloon.cChunks++;
546 else
547 {
548 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
549 RTR0MemObjFree(*pMemObj, true);
550 *pMemObj = NIL_RTR0MEMOBJ;
551 }
552 }
553 }
554 else
555 {
556 rc = vboxGuestBalloonDeflate(pMemObj, pReq);
557 if (RT_SUCCESS(rc))
558 pDevExt->MemBalloon.cChunks--;
559 else
560 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
561 }
562
563 VbglGRFree(&pReq->header);
564 return rc;
565}
566
567
568/**
569 * Cleanup the memory balloon of a session.
570 *
571 * Will request the balloon mutex, so it must be valid and the caller must not
572 * own it already.
573 *
574 * @param pDevExt The device extension.
575 * @param pDevExt The session. Can be NULL at unload.
576 */
577static void vboxGuestCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
578{
579 RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
580 if ( pDevExt->MemBalloon.pOwner == pSession
581 || pSession == NULL /*unload*/)
582 {
583 if (pDevExt->MemBalloon.paMemObj)
584 {
585 VMMDevChangeMemBalloon *pReq;
586 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
587 if (RT_SUCCESS(rc))
588 {
589 uint32_t i;
590 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
591 {
592 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
593 if (RT_FAILURE(rc))
594 {
595 LogRel(("vboxGuestCloseMemBalloon: Deflate failed with rc=%Rrc. Will leak %u chunks.\n",
596 rc, pDevExt->MemBalloon.cChunks));
597 break;
598 }
599 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
600 pDevExt->MemBalloon.cChunks--;
601 }
602 VbglGRFree(&pReq->header);
603 }
604 else
605 LogRel(("vboxGuestCloseMemBalloon: Failed to allocate VMMDev request buffer (rc=%Rrc). Will leak %u chunks.\n",
606 rc, pDevExt->MemBalloon.cChunks));
607 RTMemFree(pDevExt->MemBalloon.paMemObj);
608 pDevExt->MemBalloon.paMemObj = NULL;
609 }
610
611 pDevExt->MemBalloon.pOwner = NULL;
612 }
613 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
614}
615
616
617/**
618 * Initializes the VBoxGuest device extension when the
619 * device driver is loaded.
620 *
621 * The native code locates the VMMDev on the PCI bus and retrieve
622 * the MMIO and I/O port ranges, this function will take care of
623 * mapping the MMIO memory (if present). Upon successful return
624 * the native code should set up the interrupt handler.
625 *
626 * @returns VBox status code.
627 *
628 * @param pDevExt The device extension. Allocated by the native code.
629 * @param IOPortBase The base of the I/O port range.
630 * @param pvMMIOBase The base of the MMIO memory mapping.
631 * This is optional, pass NULL if not present.
632 * @param cbMMIO The size of the MMIO memory mapping.
633 * This is optional, pass 0 if not present.
634 * @param enmOSType The guest OS type to report to the VMMDev.
635 * @param fFixedEvents Events that will be enabled upon init and no client
636 * will ever be allowed to mask.
637 */
638int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
639 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
640{
641 int rc, rc2;
642
643 /*
644 * Adjust fFixedEvents.
645 */
646#ifdef VBOX_WITH_HGCM
647 fFixedEvents |= VMMDEV_EVENT_HGCM;
648#endif
649
650 /*
651 * Initalize the data.
652 */
653 pDevExt->IOPortBase = IOPortBase;
654 pDevExt->pVMMDevMemory = NULL;
655 pDevExt->fFixedEvents = fFixedEvents;
656 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
657 pDevExt->EventSpinlock = NIL_RTSPINLOCK;
658 pDevExt->pIrqAckEvents = NULL;
659 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
660 pDevExt->WaitList.pHead = NULL;
661 pDevExt->WaitList.pTail = NULL;
662#ifdef VBOX_WITH_HGCM
663 pDevExt->HGCMWaitList.pHead = NULL;
664 pDevExt->HGCMWaitList.pTail = NULL;
665#endif
666 pDevExt->FreeList.pHead = NULL;
667 pDevExt->FreeList.pTail = NULL;
668 pDevExt->f32PendingEvents = 0;
669 pDevExt->u32MousePosChangedSeq = 0;
670 pDevExt->SessionSpinlock = NIL_RTSPINLOCK;
671 pDevExt->u32ClipboardClientId = 0;
672 pDevExt->MemBalloon.hMtx = NIL_RTSEMFASTMUTEX;
673 pDevExt->MemBalloon.cChunks = 0;
674 pDevExt->MemBalloon.cMaxChunks = 0;
675 pDevExt->MemBalloon.fUseKernelAPI = true;
676 pDevExt->MemBalloon.paMemObj = NULL;
677 pDevExt->MemBalloon.pOwner = NULL;
678
679 /*
680 * If there is an MMIO region validate the version and size.
681 */
682 if (pvMMIOBase)
683 {
684 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
685 Assert(cbMMIO);
686 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
687 && pVMMDev->u32Size >= 32
688 && pVMMDev->u32Size <= cbMMIO)
689 {
690 pDevExt->pVMMDevMemory = pVMMDev;
691 Log(("VBoxGuestInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
692 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
693 }
694 else /* try live without it. */
695 LogRel(("VBoxGuestInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
696 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
697 }
698
699 /*
700 * Create the wait and session spinlocks as well as the ballooning mutex.
701 */
702 rc = RTSpinlockCreate(&pDevExt->EventSpinlock);
703 if (RT_SUCCESS(rc))
704 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock);
705 if (RT_FAILURE(rc))
706 {
707 LogRel(("VBoxGuestInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
708 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
709 RTSpinlockDestroy(pDevExt->EventSpinlock);
710 return rc;
711 }
712
713 rc = RTSemFastMutexCreate(&pDevExt->MemBalloon.hMtx);
714 if (RT_FAILURE(rc))
715 {
716 LogRel(("VBoxGuestInitDevExt: failed to create mutex, rc=%Rrc!\n", rc));
717 RTSpinlockDestroy(pDevExt->SessionSpinlock);
718 RTSpinlockDestroy(pDevExt->EventSpinlock);
719 return rc;
720 }
721
722 /*
723 * Initialize the guest library and report the guest info back to VMMDev,
724 * set the interrupt control filter mask, and fixate the guest mappings
725 * made by the VMM.
726 */
727 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
728 if (RT_SUCCESS(rc))
729 {
730 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
731 if (RT_SUCCESS(rc))
732 {
733 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
734 Assert(pDevExt->PhysIrqAckEvents != 0);
735
736 rc = VBoxGuestReportGuestInfo(enmOSType);
737 if (RT_SUCCESS(rc))
738 {
739 rc = vboxGuestSetFilterMask(pDevExt, fFixedEvents);
740 if (RT_SUCCESS(rc))
741 {
742 /*
743 * Disable guest graphics capability by default. The guest specific
744 * graphics driver will re-enable this when it is necessary.
745 */
746 rc = VBoxGuestSetGuestCapabilities(0, VMMDEV_GUEST_SUPPORTS_GRAPHICS);
747 if (RT_SUCCESS(rc))
748 {
749 vboxGuestInitFixateGuestMappings(pDevExt);
750
751 rc = VBoxGuestReportDriverStatus(true /* Driver is active */);
752 if (RT_FAILURE(rc))
753 LogRel(("VBoxGuestInitDevExt: VBoxReportGuestDriverStatus failed, rc=%Rrc\n", rc));
754
755 Log(("VBoxGuestInitDevExt: returns success\n"));
756 return VINF_SUCCESS;
757 }
758
759 LogRel(("VBoxGuestInitDevExt: VBoxGuestSetGuestCapabilities failed, rc=%Rrc\n", rc));
760 }
761 else
762 LogRel(("VBoxGuestInitDevExt: vboxGuestSetFilterMask failed, rc=%Rrc\n", rc));
763 }
764 else
765 LogRel(("VBoxGuestInitDevExt: VBoxReportGuestInfo failed, rc=%Rrc\n", rc));
766 VbglGRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
767 }
768 else
769 LogRel(("VBoxGuestInitDevExt: VBoxGRAlloc failed, rc=%Rrc\n", rc));
770
771 VbglTerminate();
772 }
773 else
774 LogRel(("VBoxGuestInitDevExt: VbglInit failed, rc=%Rrc\n", rc));
775
776 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
777 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
778 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
779 return rc; /* (failed) */
780}
781
782
783/**
784 * Deletes all the items in a wait chain.
785 * @param pWait The head of the chain.
786 */
787static void VBoxGuestDeleteWaitList(PVBOXGUESTWAITLIST pList)
788{
789 while (pList->pHead)
790 {
791 int rc2;
792 PVBOXGUESTWAIT pWait = pList->pHead;
793 pList->pHead = pWait->pNext;
794
795 pWait->pNext = NULL;
796 pWait->pPrev = NULL;
797 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
798 pWait->Event = NIL_RTSEMEVENTMULTI;
799 pWait->pSession = NULL;
800 RTMemFree(pWait);
801 }
802 pList->pHead = NULL;
803 pList->pTail = NULL;
804}
805
806
807/**
808 * Destroys the VBoxGuest device extension.
809 *
810 * The native code should call this before the driver is loaded,
811 * but don't call this on shutdown.
812 *
813 * @param pDevExt The device extension.
814 */
815void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
816{
817 int rc2;
818 Log(("VBoxGuestDeleteDevExt:\n"));
819 Log(("VBoxGuest: The additions driver is terminating.\n"));
820
821 /*
822 * Clean up the bits that involves the host first.
823 */
824 vboxGuestTermUnfixGuestMappings(pDevExt);
825 VBoxGuestSetGuestCapabilities(0, UINT32_MAX); /* clears all capabilities */
826 vboxGuestSetFilterMask(pDevExt, 0); /* filter all events */
827 vboxGuestCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
828
829 /*
830 * Cleanup all the other resources.
831 */
832 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
833 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
834 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
835
836 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
837#ifdef VBOX_WITH_HGCM
838 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
839#endif
840 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
841
842 VbglTerminate();
843
844 pDevExt->pVMMDevMemory = NULL;
845
846 pDevExt->IOPortBase = 0;
847 pDevExt->pIrqAckEvents = NULL;
848}
849
850
851/**
852 * Creates a VBoxGuest user session.
853 *
854 * The native code calls this when a ring-3 client opens the device.
855 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
856 *
857 * @returns VBox status code.
858 * @param pDevExt The device extension.
859 * @param ppSession Where to store the session on success.
860 */
861int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
862{
863 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
864 if (RT_UNLIKELY(!pSession))
865 {
866 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
867 return VERR_NO_MEMORY;
868 }
869
870 pSession->Process = RTProcSelf();
871 pSession->R0Process = RTR0ProcHandleSelf();
872 pSession->pDevExt = pDevExt;
873
874 *ppSession = pSession;
875 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
876 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
877 return VINF_SUCCESS;
878}
879
880
881/**
882 * Creates a VBoxGuest kernel session.
883 *
884 * The native code calls this when a ring-0 client connects to the device.
885 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
886 *
887 * @returns VBox status code.
888 * @param pDevExt The device extension.
889 * @param ppSession Where to store the session on success.
890 */
891int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
892{
893 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
894 if (RT_UNLIKELY(!pSession))
895 {
896 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
897 return VERR_NO_MEMORY;
898 }
899
900 pSession->Process = NIL_RTPROCESS;
901 pSession->R0Process = NIL_RTR0PROCESS;
902 pSession->pDevExt = pDevExt;
903
904 *ppSession = pSession;
905 LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
906 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
907 return VINF_SUCCESS;
908}
909
910
911
912/**
913 * Closes a VBoxGuest session.
914 *
915 * @param pDevExt The device extension.
916 * @param pSession The session to close (and free).
917 */
918void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
919{
920 unsigned i; NOREF(i);
921 Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
922 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
923
924#ifdef VBOX_WITH_HGCM
925 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
926 if (pSession->aHGCMClientIds[i])
927 {
928 VBoxGuestHGCMDisconnectInfo Info;
929 Info.result = 0;
930 Info.u32ClientID = pSession->aHGCMClientIds[i];
931 pSession->aHGCMClientIds[i] = 0;
932 Log(("VBoxGuestCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
933 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
934 }
935#endif
936
937 pSession->pDevExt = NULL;
938 pSession->Process = NIL_RTPROCESS;
939 pSession->R0Process = NIL_RTR0PROCESS;
940 vboxGuestCloseMemBalloon(pDevExt, pSession);
941 RTMemFree(pSession);
942}
943
944
945/**
946 * Links the wait-for-event entry into the tail of the given list.
947 *
948 * @param pList The list to link it into.
949 * @param pWait The wait for event entry to append.
950 */
951DECLINLINE(void) VBoxGuestWaitAppend(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
952{
953 const PVBOXGUESTWAIT pTail = pList->pTail;
954 pWait->pNext = NULL;
955 pWait->pPrev = pTail;
956 if (pTail)
957 pTail->pNext = pWait;
958 else
959 pList->pHead = pWait;
960 pList->pTail = pWait;
961}
962
963
964/**
965 * Unlinks the wait-for-event entry.
966 *
967 * @param pList The list to unlink it from.
968 * @param pWait The wait for event entry to unlink.
969 */
970DECLINLINE(void) VBoxGuestWaitUnlink(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
971{
972 const PVBOXGUESTWAIT pPrev = pWait->pPrev;
973 const PVBOXGUESTWAIT pNext = pWait->pNext;
974 if (pNext)
975 pNext->pPrev = pPrev;
976 else
977 pList->pTail = pPrev;
978 if (pPrev)
979 pPrev->pNext = pNext;
980 else
981 pList->pHead = pNext;
982}
983
984
985/**
986 * Allocates a wiat-for-event entry.
987 *
988 * @returns The wait-for-event entry.
989 * @param pDevExt The device extension.
990 * @param pSession The session that's allocating this. Can be NULL.
991 */
992static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
993{
994 /*
995 * Allocate it one way or the other.
996 */
997 PVBOXGUESTWAIT pWait = pDevExt->FreeList.pTail;
998 if (pWait)
999 {
1000 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1001 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1002
1003 pWait = pDevExt->FreeList.pTail;
1004 if (pWait)
1005 VBoxGuestWaitUnlink(&pDevExt->FreeList, pWait);
1006
1007 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1008 }
1009 if (!pWait)
1010 {
1011 static unsigned s_cErrors = 0;
1012 int rc;
1013
1014 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1015 if (!pWait)
1016 {
1017 if (s_cErrors++ < 32)
1018 LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
1019 return NULL;
1020 }
1021
1022 rc = RTSemEventMultiCreate(&pWait->Event);
1023 if (RT_FAILURE(rc))
1024 {
1025 if (s_cErrors++ < 32)
1026 LogRel(("VBoxGuestCommonIOCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
1027 RTMemFree(pWait);
1028 return NULL;
1029 }
1030 }
1031
1032 /*
1033 * Zero members just as an precaution.
1034 */
1035 pWait->pNext = NULL;
1036 pWait->pPrev = NULL;
1037 pWait->fReqEvents = 0;
1038 pWait->fResEvents = 0;
1039 pWait->pSession = pSession;
1040#ifdef VBOX_WITH_HGCM
1041 pWait->pHGCMReq = NULL;
1042#endif
1043 RTSemEventMultiReset(pWait->Event);
1044 return pWait;
1045}
1046
1047
1048/**
1049 * Frees the wait-for-event entry.
1050 * The caller must own the wait spinlock!
1051 *
1052 * @param pDevExt The device extension.
1053 * @param pWait The wait-for-event entry to free.
1054 */
1055static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1056{
1057 pWait->fReqEvents = 0;
1058 pWait->fResEvents = 0;
1059#ifdef VBOX_WITH_HGCM
1060 pWait->pHGCMReq = NULL;
1061#endif
1062 VBoxGuestWaitAppend(&pDevExt->FreeList, pWait);
1063}
1064
1065
1066/**
1067 * Frees the wait-for-event entry.
1068 *
1069 * @param pDevExt The device extension.
1070 * @param pWait The wait-for-event entry to free.
1071 */
1072static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1073{
1074 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1075 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1076 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1077 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1078}
1079
1080
1081/**
1082 * Modifies the guest capabilities.
1083 *
1084 * Should be called during driver init and termination.
1085 *
1086 * @returns VBox status code.
1087 * @param fOr The Or mask (what to enable).
1088 * @param fNot The Not mask (what to disable).
1089 */
1090int VBoxGuestSetGuestCapabilities(uint32_t fOr, uint32_t fNot)
1091{
1092 VMMDevReqGuestCapabilities2 *pReq;
1093 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
1094 if (RT_FAILURE(rc))
1095 {
1096 Log(("VBoxGuestSetGuestCapabilities: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1097 sizeof(*pReq), sizeof(*pReq), rc));
1098 return rc;
1099 }
1100
1101 pReq->u32OrMask = fOr;
1102 pReq->u32NotMask = fNot;
1103
1104 rc = VbglGRPerform(&pReq->header);
1105 if (RT_FAILURE(rc))
1106 Log(("VBoxGuestSetGuestCapabilities: VbglGRPerform failed, rc=%Rrc!\n", rc));
1107
1108 VbglGRFree(&pReq->header);
1109 return rc;
1110}
1111
1112
1113/**
1114 * Implements the fast (no input or output) type of IOCtls.
1115 *
1116 * This is currently just a placeholder stub inherited from the support driver code.
1117 *
1118 * @returns VBox status code.
1119 * @param iFunction The IOCtl function number.
1120 * @param pDevExt The device extension.
1121 * @param pSession The session.
1122 */
1123int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1124{
1125 Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1126
1127 NOREF(iFunction);
1128 NOREF(pDevExt);
1129 NOREF(pSession);
1130 return VERR_NOT_SUPPORTED;
1131}
1132
1133
1134/**
1135 * Return the VMM device port.
1136 *
1137 * returns IPRT status code.
1138 * @param pDevExt The device extension.
1139 * @param pInfo The request info.
1140 * @param pcbDataReturned (out) contains the number of bytes to return.
1141 */
1142static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
1143{
1144 Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
1145 pInfo->portAddress = pDevExt->IOPortBase;
1146 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
1147 if (pcbDataReturned)
1148 *pcbDataReturned = sizeof(*pInfo);
1149 return VINF_SUCCESS;
1150}
1151
1152
1153/**
1154 * Worker VBoxGuestCommonIOCtl_WaitEvent.
1155 * The caller enters the spinlock, we may or may not leave it.
1156 *
1157 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
1158 */
1159DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWaitEventInfo *pInfo,
1160 int iEvent, const uint32_t fReqEvents, PRTSPINLOCKTMP pTmp)
1161{
1162 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
1163 if (fMatches)
1164 {
1165 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
1166 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, pTmp);
1167
1168 pInfo->u32EventFlagsOut = fMatches;
1169 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1170 if (fReqEvents & ~((uint32_t)1 << iEvent))
1171 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1172 else
1173 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1174 return VINF_SUCCESS;
1175 }
1176 return VERR_TIMEOUT;
1177}
1178
1179
1180static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1181 VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned, bool fInterruptible)
1182{
1183 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1184 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
1185 uint32_t fResEvents;
1186 int iEvent;
1187 PVBOXGUESTWAIT pWait;
1188 int rc;
1189
1190 pInfo->u32EventFlagsOut = 0;
1191 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1192 if (pcbDataReturned)
1193 *pcbDataReturned = sizeof(*pInfo);
1194
1195 /*
1196 * Copy and verify the input mask.
1197 */
1198 iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
1199 if (RT_UNLIKELY(iEvent < 0))
1200 {
1201 Log(("VBoxGuestCommonIOCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
1202 return VERR_INVALID_PARAMETER;
1203 }
1204
1205 /*
1206 * Check the condition up front, before doing the wait-for-event allocations.
1207 */
1208 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1209 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
1210 if (rc == VINF_SUCCESS)
1211 return rc;
1212 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1213
1214 if (!pInfo->u32TimeoutIn)
1215 {
1216 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1217 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
1218 return VERR_TIMEOUT;
1219 }
1220
1221 pWait = VBoxGuestWaitAlloc(pDevExt, pSession);
1222 if (!pWait)
1223 return VERR_NO_MEMORY;
1224 pWait->fReqEvents = fReqEvents;
1225
1226 /*
1227 * We've got the wait entry now, re-enter the spinlock and check for the condition.
1228 * If the wait condition is met, return.
1229 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
1230 */
1231 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1232 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
1233 if (rc == VINF_SUCCESS)
1234 {
1235 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1236 return rc;
1237 }
1238 VBoxGuestWaitAppend(&pDevExt->WaitList, pWait);
1239 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1240
1241 if (fInterruptible)
1242 rc = RTSemEventMultiWaitNoResume(pWait->Event,
1243 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1244 else
1245 rc = RTSemEventMultiWait(pWait->Event,
1246 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1247
1248 /*
1249 * There is one special case here and that's when the semaphore is
1250 * destroyed upon device driver unload. This shouldn't happen of course,
1251 * but in case it does, just get out of here ASAP.
1252 */
1253 if (rc == VERR_SEM_DESTROYED)
1254 return rc;
1255
1256 /*
1257 * Unlink the wait item and dispose of it.
1258 */
1259 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1260 VBoxGuestWaitUnlink(&pDevExt->WaitList, pWait);
1261 fResEvents = pWait->fResEvents;
1262 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1263 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1264
1265 /*
1266 * Now deal with the return code.
1267 */
1268 if ( fResEvents
1269 && fResEvents != UINT32_MAX)
1270 {
1271 pInfo->u32EventFlagsOut = fResEvents;
1272 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1273 if (fReqEvents & ~((uint32_t)1 << iEvent))
1274 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1275 else
1276 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1277 rc = VINF_SUCCESS;
1278 }
1279 else if ( fResEvents == UINT32_MAX
1280 || rc == VERR_INTERRUPTED)
1281 {
1282 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
1283 rc = VERR_INTERRUPTED;
1284 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
1285 }
1286 else if (rc == VERR_TIMEOUT)
1287 {
1288 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1289 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
1290 }
1291 else
1292 {
1293 if (RT_SUCCESS(rc))
1294 {
1295 static unsigned s_cErrors = 0;
1296 if (s_cErrors++ < 32)
1297 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
1298 rc = VERR_INTERNAL_ERROR;
1299 }
1300 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1301 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc\n", rc));
1302 }
1303
1304 return rc;
1305}
1306
1307
1308static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1309{
1310 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1311#if defined(RT_OS_SOLARIS)
1312 RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER;
1313#endif
1314 PVBOXGUESTWAIT pWait;
1315 int rc = 0;
1316
1317 Log(("VBoxGuestCommonIOCtl: CANCEL_ALL_WAITEVENTS\n"));
1318
1319 /*
1320 * Walk the event list and wake up anyone with a matching session.
1321 *
1322 * Note! On Solaris we have to do really ugly stuff here because
1323 * RTSemEventMultiSignal cannot be called with interrupts disabled.
1324 * The hack is racy, but what we can we do... (Eliminate this
1325 * termination hack, perhaps?)
1326 */
1327#if defined(RT_OS_SOLARIS)
1328 RTThreadPreemptDisable(&State);
1329 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1330 do
1331 {
1332 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
1333 if ( pWait->pSession == pSession
1334 && pWait->fResEvents != UINT32_MAX)
1335 {
1336 RTSEMEVENTMULTI hEvent = pWait->Event;
1337 pWait->fResEvents = UINT32_MAX;
1338 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1339 /* HACK ALRET! This races wakeup + reuse! */
1340 rc |= RTSemEventMultiSignal(hEvent);
1341 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1342 break;
1343 }
1344 } while (pWait);
1345 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1346 RTThreadPreemptDisable(&State);
1347#else
1348 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1349 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
1350 if (pWait->pSession == pSession)
1351 {
1352 pWait->fResEvents = UINT32_MAX;
1353 rc |= RTSemEventMultiSignal(pWait->Event);
1354 }
1355 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1356#endif
1357 Assert(rc == 0);
1358
1359 return VINF_SUCCESS;
1360}
1361
1362
1363static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1364 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
1365{
1366 int rc;
1367 VMMDevRequestHeader *pReqCopy;
1368
1369 /*
1370 * Validate the header and request size.
1371 */
1372 const VMMDevRequestType enmType = pReqHdr->requestType;
1373 const uint32_t cbReq = pReqHdr->size;
1374 const uint32_t cbMinSize = vmmdevGetRequestSize(enmType);
1375
1376 Log(("VBoxGuestCommonIOCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
1377
1378 if (cbReq < cbMinSize)
1379 {
1380 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
1381 cbReq, cbMinSize, enmType));
1382 return VERR_INVALID_PARAMETER;
1383 }
1384 if (cbReq > cbData)
1385 {
1386 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
1387 cbData, cbReq, enmType));
1388 return VERR_INVALID_PARAMETER;
1389 }
1390 rc = VbglGRVerify(pReqHdr, cbData);
1391 if (RT_FAILURE(rc))
1392 {
1393 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc!!\n",
1394 cbData, cbReq, enmType, rc));
1395 return rc;
1396 }
1397
1398 /*
1399 * Make a copy of the request in the physical memory heap so
1400 * the VBoxGuestLibrary can more easily deal with the request.
1401 * (This is really a waste of time since the OS or the OS specific
1402 * code has already buffered or locked the input/output buffer, but
1403 * it does makes things a bit simpler wrt to phys address.)
1404 */
1405 rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
1406 if (RT_FAILURE(rc))
1407 {
1408 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1409 cbReq, cbReq, rc));
1410 return rc;
1411 }
1412 memcpy(pReqCopy, pReqHdr, cbReq);
1413
1414 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
1415 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
1416
1417 rc = VbglGRPerform(pReqCopy);
1418 if ( RT_SUCCESS(rc)
1419 && RT_SUCCESS(pReqCopy->rc))
1420 {
1421 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
1422 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
1423
1424 memcpy(pReqHdr, pReqCopy, cbReq);
1425 if (pcbDataReturned)
1426 *pcbDataReturned = cbReq;
1427 }
1428 else if (RT_FAILURE(rc))
1429 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
1430 else
1431 {
1432 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
1433 rc = pReqCopy->rc;
1434 }
1435
1436 VbglGRFree(pReqCopy);
1437 return rc;
1438}
1439
1440
1441static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, VBoxGuestFilterMaskInfo *pInfo)
1442{
1443 VMMDevCtlGuestFilterMask *pReq;
1444 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
1445 if (RT_FAILURE(rc))
1446 {
1447 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1448 sizeof(*pReq), sizeof(*pReq), rc));
1449 return rc;
1450 }
1451
1452 pReq->u32OrMask = pInfo->u32OrMask;
1453 pReq->u32NotMask = pInfo->u32NotMask;
1454 pReq->u32NotMask &= ~pDevExt->fFixedEvents; /* don't permit these to be cleared! */
1455 rc = VbglGRPerform(&pReq->header);
1456 if (RT_FAILURE(rc))
1457 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: VbglGRPerform failed, rc=%Rrc!\n", rc));
1458
1459 VbglGRFree(&pReq->header);
1460 return rc;
1461}
1462
1463#ifdef VBOX_WITH_HGCM
1464
1465AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
1466
1467/** Worker for VBoxGuestHGCMAsyncWaitCallback*. */
1468static int VBoxGuestHGCMAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
1469 bool fInterruptible, uint32_t cMillies)
1470{
1471 int rc;
1472
1473 /*
1474 * Check to see if the condition was met by the time we got here.
1475 *
1476 * We create a simple poll loop here for dealing with out-of-memory
1477 * conditions since the caller isn't necessarily able to deal with
1478 * us returning too early.
1479 */
1480 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1481 PVBOXGUESTWAIT pWait;
1482 for (;;)
1483 {
1484 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1485 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1486 {
1487 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1488 return VINF_SUCCESS;
1489 }
1490 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1491
1492 pWait = VBoxGuestWaitAlloc(pDevExt, NULL);
1493 if (pWait)
1494 break;
1495 if (fInterruptible)
1496 return VERR_INTERRUPTED;
1497 RTThreadSleep(1);
1498 }
1499 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
1500 pWait->pHGCMReq = pHdr;
1501
1502 /*
1503 * Re-enter the spinlock and re-check for the condition.
1504 * If the condition is met, return.
1505 * Otherwise link us into the HGCM wait list and go to sleep.
1506 */
1507 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1508 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1509 {
1510 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1511 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1512 return VINF_SUCCESS;
1513 }
1514 VBoxGuestWaitAppend(&pDevExt->HGCMWaitList, pWait);
1515 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1516
1517 if (fInterruptible)
1518 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
1519 else
1520 rc = RTSemEventMultiWait(pWait->Event, cMillies);
1521 if (rc == VERR_SEM_DESTROYED)
1522 return rc;
1523
1524 /*
1525 * Unlink, free and return.
1526 */
1527 if ( RT_FAILURE(rc)
1528 && rc != VERR_TIMEOUT
1529 && ( !fInterruptible
1530 || rc != VERR_INTERRUPTED))
1531 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
1532
1533 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1534 VBoxGuestWaitUnlink(&pDevExt->HGCMWaitList, pWait);
1535 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1536 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1537 return rc;
1538}
1539
1540
1541/**
1542 * This is a callback for dealing with async waits.
1543 *
1544 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1545 */
1546static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
1547{
1548 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1549 Log(("VBoxGuestHGCMAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
1550 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1551 pDevExt,
1552 false /* fInterruptible */,
1553 u32User /* cMillies */);
1554}
1555
1556
1557/**
1558 * This is a callback for dealing with async waits with a timeout.
1559 *
1560 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1561 */
1562static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr,
1563 void *pvUser, uint32_t u32User)
1564{
1565 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1566 Log(("VBoxGuestHGCMAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
1567 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1568 pDevExt,
1569 true /* fInterruptible */,
1570 u32User /* cMillies */ );
1571
1572}
1573
1574
1575static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1576 VBoxGuestHGCMConnectInfo *pInfo, size_t *pcbDataReturned)
1577{
1578 int rc;
1579
1580 /*
1581 * The VbglHGCMConnect call will invoke the callback if the HGCM
1582 * call is performed in an ASYNC fashion. The function is not able
1583 * to deal with cancelled requests.
1584 */
1585 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
1586 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
1587 ? pInfo->Loc.u.host.achName : "<not local host>"));
1588
1589 rc = VbglR0HGCMInternalConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1590 if (RT_SUCCESS(rc))
1591 {
1592 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
1593 pInfo->u32ClientID, pInfo->result, rc));
1594 if (RT_SUCCESS(pInfo->result))
1595 {
1596 /*
1597 * Append the client id to the client id table.
1598 * If the table has somehow become filled up, we'll disconnect the session.
1599 */
1600 unsigned i;
1601 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1602 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1603 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1604 if (!pSession->aHGCMClientIds[i])
1605 {
1606 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
1607 break;
1608 }
1609 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1610 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1611 {
1612 static unsigned s_cErrors = 0;
1613 VBoxGuestHGCMDisconnectInfo Info;
1614
1615 if (s_cErrors++ < 32)
1616 LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
1617
1618 Info.result = 0;
1619 Info.u32ClientID = pInfo->u32ClientID;
1620 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1621 return VERR_TOO_MANY_OPEN_FILES;
1622 }
1623 }
1624 if (pcbDataReturned)
1625 *pcbDataReturned = sizeof(*pInfo);
1626 }
1627 return rc;
1628}
1629
1630
1631static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
1632 size_t *pcbDataReturned)
1633{
1634 /*
1635 * Validate the client id and invalidate its entry while we're in the call.
1636 */
1637 int rc;
1638 const uint32_t u32ClientId = pInfo->u32ClientID;
1639 unsigned i;
1640 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1641 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1642 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1643 if (pSession->aHGCMClientIds[i] == u32ClientId)
1644 {
1645 pSession->aHGCMClientIds[i] = UINT32_MAX;
1646 break;
1647 }
1648 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1649 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1650 {
1651 static unsigned s_cErrors = 0;
1652 if (s_cErrors++ > 32)
1653 LogRel(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
1654 return VERR_INVALID_HANDLE;
1655 }
1656
1657 /*
1658 * The VbglHGCMConnect call will invoke the callback if the HGCM
1659 * call is performed in an ASYNC fashion. The function is not able
1660 * to deal with cancelled requests.
1661 */
1662 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
1663 rc = VbglR0HGCMInternalDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1664 if (RT_SUCCESS(rc))
1665 {
1666 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
1667 if (pcbDataReturned)
1668 *pcbDataReturned = sizeof(*pInfo);
1669 }
1670
1671 /* Update the client id array according to the result. */
1672 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1673 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
1674 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
1675 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1676
1677 return rc;
1678}
1679
1680
1681static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt,
1682 PVBOXGUESTSESSION pSession,
1683 VBoxGuestHGCMCallInfo *pInfo,
1684 uint32_t cMillies, bool fInterruptible, bool f32bit,
1685 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
1686{
1687 const uint32_t u32ClientId = pInfo->u32ClientID;
1688 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1689 uint32_t fFlags;
1690 size_t cbActual;
1691 unsigned i;
1692 int rc;
1693
1694 /*
1695 * Some more validations.
1696 */
1697 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
1698 {
1699 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
1700 return VERR_INVALID_PARAMETER;
1701 }
1702
1703 cbActual = cbExtra + sizeof(*pInfo);
1704#ifdef RT_ARCH_AMD64
1705 if (f32bit)
1706 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
1707 else
1708#endif
1709 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
1710 if (cbData < cbActual)
1711 {
1712 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
1713 cbData, cbActual));
1714 return VERR_INVALID_PARAMETER;
1715 }
1716
1717 /*
1718 * Validate the client id.
1719 */
1720 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1721 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1722 if (pSession->aHGCMClientIds[i] == u32ClientId)
1723 break;
1724 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1725 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
1726 {
1727 static unsigned s_cErrors = 0;
1728 if (s_cErrors++ > 32)
1729 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
1730 return VERR_INVALID_HANDLE;
1731 }
1732
1733 /*
1734 * The VbglHGCMCall call will invoke the callback if the HGCM
1735 * call is performed in an ASYNC fashion. This function can
1736 * deal with cancelled requests, so we let user more requests
1737 * be interruptible (should add a flag for this later I guess).
1738 */
1739 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
1740 fFlags = pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
1741#ifdef RT_ARCH_AMD64
1742 if (f32bit)
1743 {
1744 if (fInterruptible)
1745 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1746 else
1747 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1748 }
1749 else
1750#endif
1751 {
1752 if (fInterruptible)
1753 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1754 else
1755 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1756 }
1757 if (RT_SUCCESS(rc))
1758 {
1759 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
1760 if (pcbDataReturned)
1761 *pcbDataReturned = cbActual;
1762 }
1763 else
1764 {
1765 if ( rc != VERR_INTERRUPTED
1766 && rc != VERR_TIMEOUT)
1767 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
1768 else
1769 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
1770 }
1771 return rc;
1772}
1773
1774
1775/**
1776 * @returns VBox status code. Unlike the other HGCM IOCtls this will combine
1777 * the VbglHGCMConnect/Disconnect return code with the Info.result.
1778 *
1779 * @param pDevExt The device extension.
1780 * @param pu32ClientId The client id.
1781 * @param pcbDataReturned Where to store the amount of returned data. Can
1782 * be NULL.
1783 */
1784static int VBoxGuestCommonIOCtl_HGCMClipboardReConnect(PVBOXGUESTDEVEXT pDevExt, uint32_t *pu32ClientId, size_t *pcbDataReturned)
1785{
1786 int rc;
1787 VBoxGuestHGCMConnectInfo CnInfo;
1788
1789 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: Current u32ClientId=%RX32\n", pDevExt->u32ClipboardClientId));
1790
1791 /*
1792 * If there is an old client, try disconnect it first.
1793 */
1794 if (pDevExt->u32ClipboardClientId != 0)
1795 {
1796 VBoxGuestHGCMDisconnectInfo DiInfo;
1797 DiInfo.result = VERR_WRONG_ORDER;
1798 DiInfo.u32ClientID = pDevExt->u32ClipboardClientId;
1799 rc = VbglR0HGCMInternalDisconnect(&DiInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1800 if (RT_SUCCESS(rc))
1801 {
1802 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. VbglHGCMDisconnect -> rc=%Rrc\n", rc));
1803 return rc;
1804 }
1805 if (RT_FAILURE((int32_t)DiInfo.result))
1806 {
1807 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. DiInfo.result=%Rrc\n", DiInfo.result));
1808 return DiInfo.result;
1809 }
1810 pDevExt->u32ClipboardClientId = 0;
1811 }
1812
1813 /*
1814 * Try connect.
1815 */
1816 CnInfo.Loc.type = VMMDevHGCMLoc_LocalHost_Existing;
1817 strcpy(CnInfo.Loc.u.host.achName, "VBoxSharedClipboard");
1818 CnInfo.u32ClientID = 0;
1819 CnInfo.result = VERR_WRONG_ORDER;
1820
1821 rc = VbglR0HGCMInternalConnect(&CnInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1822 if (RT_FAILURE(rc))
1823 {
1824 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1825 return rc;
1826 }
1827 if (RT_FAILURE(CnInfo.result))
1828 {
1829 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1830 return rc;
1831 }
1832
1833 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: connected successfully u32ClientId=%RX32\n", CnInfo.u32ClientID));
1834
1835 pDevExt->u32ClipboardClientId = CnInfo.u32ClientID;
1836 *pu32ClientId = CnInfo.u32ClientID;
1837 if (pcbDataReturned)
1838 *pcbDataReturned = sizeof(uint32_t);
1839
1840 return VINF_SUCCESS;
1841}
1842
1843#endif /* VBOX_WITH_HGCM */
1844
1845/**
1846 * Handle VBOXGUEST_IOCTL_CHECK_BALLOON from R3.
1847 *
1848 * Ask the host for the size of the balloon and try to set it accordingly. If
1849 * this approach fails because it's not supported, return with fHandleInR3 set
1850 * and let the user land supply memory we can lock via the other ioctl.
1851 *
1852 * @returns VBox status code.
1853 *
1854 * @param pDevExt The device extension.
1855 * @param pSession The session.
1856 * @param pInfo The output buffer.
1857 * @param pcbDataReturned Where to store the amount of returned data. Can
1858 * be NULL.
1859 */
1860static int VBoxGuestCommonIOCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1861 VBoxGuestCheckBalloonInfo *pInfo, size_t *pcbDataReturned)
1862{
1863 VMMDevGetMemBalloonChangeRequest *pReq;
1864 int rc;
1865
1866 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON\n"));
1867 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
1868 AssertRCReturn(rc, rc);
1869
1870 /*
1871 * The first user trying to query/change the balloon becomes the
1872 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
1873 */
1874 if ( pDevExt->MemBalloon.pOwner != pSession
1875 && pDevExt->MemBalloon.pOwner == NULL)
1876 pDevExt->MemBalloon.pOwner = pSession;
1877
1878 if (pDevExt->MemBalloon.pOwner == pSession)
1879 {
1880 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
1881 if (RT_SUCCESS(rc))
1882 {
1883 /*
1884 * This is a response to that event. Setting this bit means that
1885 * we request the value from the host and change the guest memory
1886 * balloon according to this value.
1887 */
1888 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
1889 rc = VbglGRPerform(&pReq->header);
1890 if (RT_SUCCESS(rc))
1891 {
1892 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
1893 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
1894
1895 pInfo->cBalloonChunks = pReq->cBalloonChunks;
1896 pInfo->fHandleInR3 = false;
1897
1898 rc = vboxGuestSetBalloonSizeKernel(pDevExt, pReq->cBalloonChunks, &pInfo->fHandleInR3);
1899 /* Ignore various out of memory failures. */
1900 if ( rc == VERR_NO_MEMORY
1901 || rc == VERR_NO_PHYS_MEMORY
1902 || rc == VERR_NO_CONT_MEMORY)
1903 rc = VINF_SUCCESS;
1904
1905 if (pcbDataReturned)
1906 *pcbDataReturned = sizeof(VBoxGuestCheckBalloonInfo);
1907 }
1908 else
1909 LogRel(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON: VbglGRPerform failed. rc=%Rrc\n", rc));
1910 VbglGRFree(&pReq->header);
1911 }
1912 }
1913 else
1914 rc = VERR_PERMISSION_DENIED;
1915
1916 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
1917 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON returns %Rrc\n", rc));
1918 return rc;
1919}
1920
1921
1922/**
1923 * Handle a request for changing the memory balloon.
1924 *
1925 * @returns VBox status code.
1926 *
1927 * @param pDevExt The device extention.
1928 * @param pSession The session.
1929 * @param pInfo The change request structure (input).
1930 * @param pcbDataReturned Where to store the amount of returned data. Can
1931 * be NULL.
1932 */
1933static int VBoxGuestCommonIOCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1934 VBoxGuestChangeBalloonInfo *pInfo, size_t *pcbDataReturned)
1935{
1936 int rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
1937 AssertRCReturn(rc, rc);
1938
1939 if (!pDevExt->MemBalloon.fUseKernelAPI)
1940 {
1941 /*
1942 * The first user trying to query/change the balloon becomes the
1943 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
1944 */
1945 if ( pDevExt->MemBalloon.pOwner != pSession
1946 && pDevExt->MemBalloon.pOwner == NULL)
1947 pDevExt->MemBalloon.pOwner = pSession;
1948
1949 if (pDevExt->MemBalloon.pOwner == pSession)
1950 {
1951 rc = vboxGuestSetBalloonSizeFromUser(pDevExt, pSession, pInfo->u64ChunkAddr, pInfo->fInflate);
1952 if (pcbDataReturned)
1953 *pcbDataReturned = 0;
1954 }
1955 else
1956 rc = VERR_PERMISSION_DENIED;
1957 }
1958 else
1959 rc = VERR_PERMISSION_DENIED;
1960
1961 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
1962 return rc;
1963}
1964
1965
1966/**
1967 * Guest backdoor logging.
1968 *
1969 * @returns VBox status code.
1970 *
1971 * @param pch The log message (need not be NULL terminated).
1972 * @param cbData Size of the buffer.
1973 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
1974 */
1975static int VBoxGuestCommonIOCtl_Log(const char *pch, size_t cbData, size_t *pcbDataReturned)
1976{
1977 NOREF(pch);
1978 NOREF(cbData);
1979 Log(("%.*s", cbData, pch));
1980 if (pcbDataReturned)
1981 *pcbDataReturned = 0;
1982 return VINF_SUCCESS;
1983}
1984
1985
1986/**
1987 * Common IOCtl for user to kernel and kernel to kernel communcation.
1988 *
1989 * This function only does the basic validation and then invokes
1990 * worker functions that takes care of each specific function.
1991 *
1992 * @returns VBox status code.
1993 *
1994 * @param iFunction The requested function.
1995 * @param pDevExt The device extension.
1996 * @param pSession The client session.
1997 * @param pvData The input/output data buffer. Can be NULL depending on the function.
1998 * @param cbData The max size of the data buffer.
1999 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2000 */
2001int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2002 void *pvData, size_t cbData, size_t *pcbDataReturned)
2003{
2004 int rc;
2005 Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
2006 iFunction, pDevExt, pSession, pvData, cbData));
2007
2008 /*
2009 * Make sure the returned data size is set to zero.
2010 */
2011 if (pcbDataReturned)
2012 *pcbDataReturned = 0;
2013
2014 /*
2015 * Define some helper macros to simplify validation.
2016 */
2017#define CHECKRET_RING0(mnemonic) \
2018 do { \
2019 if (pSession->R0Process != NIL_RTR0PROCESS) \
2020 { \
2021 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
2022 pSession->Process, (uintptr_t)pSession->R0Process)); \
2023 return VERR_PERMISSION_DENIED; \
2024 } \
2025 } while (0)
2026#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
2027 do { \
2028 if (cbData < (cbMin)) \
2029 { \
2030 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
2031 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
2032 return VERR_BUFFER_OVERFLOW; \
2033 } \
2034 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
2035 { \
2036 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": Invalid pointer %p\n", pvData)); \
2037 return VERR_INVALID_POINTER; \
2038 } \
2039 } while (0)
2040
2041
2042 /*
2043 * Deal with variably sized requests first.
2044 */
2045 rc = VINF_SUCCESS;
2046 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
2047 {
2048 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
2049 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
2050 }
2051#ifdef VBOX_WITH_HGCM
2052 /*
2053 * These ones are a bit tricky.
2054 */
2055 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
2056 {
2057 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2058 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2059 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2060 fInterruptible, false /*f32bit*/,
2061 0, cbData, pcbDataReturned);
2062 }
2063 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
2064 {
2065 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2066 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2067 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2068 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2069 false /*f32bit*/,
2070 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2071 }
2072# ifdef RT_ARCH_AMD64
2073 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
2074 {
2075 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2076 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2077 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2078 fInterruptible, true /*f32bit*/,
2079 0, cbData, pcbDataReturned);
2080 }
2081 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
2082 {
2083 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2084 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2085 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2086 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2087 true /*f32bit*/,
2088 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2089 }
2090# endif
2091#endif /* VBOX_WITH_HGCM */
2092 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
2093 {
2094 CHECKRET_MIN_SIZE("LOG", 1);
2095 rc = VBoxGuestCommonIOCtl_Log((char *)pvData, cbData, pcbDataReturned);
2096 }
2097 else
2098 {
2099 switch (iFunction)
2100 {
2101 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
2102 CHECKRET_RING0("GETVMMDEVPORT");
2103 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
2104 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
2105 break;
2106
2107 case VBOXGUEST_IOCTL_WAITEVENT:
2108 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
2109 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, pSession, (VBoxGuestWaitEventInfo *)pvData,
2110 pcbDataReturned, pSession->R0Process != NIL_RTR0PROCESS);
2111 break;
2112
2113 case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
2114 if (cbData != 0)
2115 rc = VERR_INVALID_PARAMETER;
2116 rc = VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
2117 break;
2118
2119 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
2120 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
2121 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, (VBoxGuestFilterMaskInfo *)pvData);
2122 break;
2123
2124#ifdef VBOX_WITH_HGCM
2125 case VBOXGUEST_IOCTL_HGCM_CONNECT:
2126# ifdef RT_ARCH_AMD64
2127 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
2128# endif
2129 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
2130 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
2131 break;
2132
2133 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
2134# ifdef RT_ARCH_AMD64
2135 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
2136# endif
2137 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
2138 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
2139 break;
2140
2141 case VBOXGUEST_IOCTL_CLIPBOARD_CONNECT:
2142 CHECKRET_MIN_SIZE("CLIPBOARD_CONNECT", sizeof(uint32_t));
2143 rc = VBoxGuestCommonIOCtl_HGCMClipboardReConnect(pDevExt, (uint32_t *)pvData, pcbDataReturned);
2144 break;
2145#endif /* VBOX_WITH_HGCM */
2146
2147 case VBOXGUEST_IOCTL_CHECK_BALLOON:
2148 CHECKRET_MIN_SIZE("CHECK_MEMORY_BALLOON", sizeof(VBoxGuestCheckBalloonInfo));
2149 rc = VBoxGuestCommonIOCtl_CheckMemoryBalloon(pDevExt, pSession, (VBoxGuestCheckBalloonInfo *)pvData, pcbDataReturned);
2150 break;
2151
2152 case VBOXGUEST_IOCTL_CHANGE_BALLOON:
2153 CHECKRET_MIN_SIZE("CHANGE_MEMORY_BALLOON", sizeof(VBoxGuestChangeBalloonInfo));
2154 rc = VBoxGuestCommonIOCtl_ChangeMemoryBalloon(pDevExt, pSession, (VBoxGuestChangeBalloonInfo *)pvData, pcbDataReturned);
2155 break;
2156
2157 default:
2158 {
2159 LogRel(("VBoxGuestCommonIOCtl: Unknown request iFunction=%#x Stripped size=%#x\n", iFunction,
2160 VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
2161 rc = VERR_NOT_SUPPORTED;
2162 break;
2163 }
2164 }
2165 }
2166
2167 Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
2168 return rc;
2169}
2170
2171
2172
2173/**
2174 * Common interrupt service routine.
2175 *
2176 * This deals with events and with waking up thread waiting for those events.
2177 *
2178 * @returns true if it was our interrupt, false if it wasn't.
2179 * @param pDevExt The VBoxGuest device extension.
2180 */
2181bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
2182{
2183 bool fMousePositionChanged = false;
2184 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
2185 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
2186 int rc = 0;
2187 bool fOurIrq;
2188
2189 /*
2190 * Make sure we've initalized the device extension.
2191 */
2192 if (RT_UNLIKELY(!pReq))
2193 return false;
2194
2195 /*
2196 * Enter the spinlock and check if it's our IRQ or not.
2197 *
2198 * Note! Solaris cannot do RTSemEventMultiSignal with interrupts disabled
2199 * so we're entering the spinlock without disabling them. This works
2200 * fine as long as we never called in a nested fashion.
2201 */
2202#if defined(RT_OS_SOLARIS)
2203 RTSpinlockAcquire(pDevExt->EventSpinlock, &Tmp);
2204#else
2205 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
2206#endif
2207 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
2208 if (fOurIrq)
2209 {
2210 /*
2211 * Acknowlegde events.
2212 * We don't use VbglGRPerform here as it may take another spinlocks.
2213 */
2214 pReq->header.rc = VERR_INTERNAL_ERROR;
2215 pReq->events = 0;
2216 ASMCompilerBarrier();
2217 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
2218 ASMCompilerBarrier(); /* paranoia */
2219 if (RT_SUCCESS(pReq->header.rc))
2220 {
2221 uint32_t fEvents = pReq->events;
2222 PVBOXGUESTWAIT pWait;
2223
2224 Log(("VBoxGuestCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
2225
2226 /*
2227 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
2228 */
2229 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
2230 {
2231 fMousePositionChanged = true;
2232 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
2233 }
2234
2235#ifdef VBOX_WITH_HGCM
2236 /*
2237 * The HGCM event/list is kind of different in that we evaluate all entries.
2238 */
2239 if (fEvents & VMMDEV_EVENT_HGCM)
2240 {
2241 for (pWait = pDevExt->HGCMWaitList.pHead; pWait; pWait = pWait->pNext)
2242 if ( !pWait->fResEvents
2243 && (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE))
2244 {
2245 pWait->fResEvents = VMMDEV_EVENT_HGCM;
2246 rc |= RTSemEventMultiSignal(pWait->Event);
2247 }
2248 fEvents &= ~VMMDEV_EVENT_HGCM;
2249 }
2250#endif
2251
2252 /*
2253 * Normal FIFO waiter evaluation.
2254 */
2255 fEvents |= pDevExt->f32PendingEvents;
2256 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
2257 if ( (pWait->fReqEvents & fEvents)
2258 && !pWait->fResEvents)
2259 {
2260 pWait->fResEvents = pWait->fReqEvents & fEvents;
2261 fEvents &= ~pWait->fResEvents;
2262 rc |= RTSemEventMultiSignal(pWait->Event);
2263 if (!fEvents)
2264 break;
2265 }
2266 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
2267 }
2268 else /* something is serious wrong... */
2269 Log(("VBoxGuestCommonISR: acknowledge events failed rc=%Rrc (events=%#x)!!\n",
2270 pReq->header.rc, pReq->events));
2271 }
2272 else
2273 LogFlow(("VBoxGuestCommonISR: not ours\n"));
2274
2275 /*
2276 * Work the poll and async notification queues on OSes that implements that.
2277 * Do this outside the spinlock to prevent some recursive spinlocking.
2278 */
2279#if defined(RT_OS_SOLARIS)
2280 RTSpinlockRelease(pDevExt->EventSpinlock, &Tmp);
2281#else
2282 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
2283#endif
2284
2285 if (fMousePositionChanged)
2286 {
2287 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
2288 VBoxGuestNativeISRMousePollEvent(pDevExt);
2289 }
2290
2291 Assert(rc == 0);
2292 return fOurIrq;
2293}
2294
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette