VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 32274

Last change on this file since 32274 was 32274, checked in by vboxsync, 15 years ago

Guest Additions/common: Return VERR_NOT_IMPLEMENTED in VBoxGuestCommonIOCtl_*VRDPSession.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 83.3 KB
Line 
1/* $Id: VBoxGuest.cpp 32274 2010-09-07 11:19:18Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEFAULT
23#include "VBoxGuestInternal.h"
24#include "VBoxGuest2.h"
25#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
26#include <VBox/log.h>
27#include <iprt/mem.h>
28#include <iprt/time.h>
29#include <iprt/memobj.h>
30#include <iprt/asm.h>
31#include <iprt/asm-amd64-x86.h>
32#include <iprt/string.h>
33#include <iprt/process.h>
34#include <iprt/assert.h>
35#include <iprt/param.h>
36#ifdef VBOX_WITH_HGCM
37# include <iprt/thread.h>
38#endif
39#include "version-generated.h"
40#if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
41# include "revision-generated.h"
42#endif
43#if defined(RT_OS_WINDOWS)
44RT_C_DECLS_BEGIN
45# include <ntddk.h>
46RT_C_DECLS_END
47#endif
48
49
50/*******************************************************************************
51* Internal Functions *
52*******************************************************************************/
53#ifdef VBOX_WITH_HGCM
54static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
55#endif
56
57
58/*******************************************************************************
59* Global Variables *
60*******************************************************************************/
61static const size_t cbChangeMemBalloonReq = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
62
63
64
65/**
66 * Reserves memory in which the VMM can relocate any guest mappings
67 * that are floating around.
68 *
69 * This operation is a little bit tricky since the VMM might not accept
70 * just any address because of address clashes between the three contexts
71 * it operates in, so use a small stack to perform this operation.
72 *
73 * @returns VBox status code (ignored).
74 * @param pDevExt The device extension.
75 */
76static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
77{
78 /*
79 * Query the required space.
80 */
81 VMMDevReqHypervisorInfo *pReq;
82 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
83 if (RT_FAILURE(rc))
84 return rc;
85 pReq->hypervisorStart = 0;
86 pReq->hypervisorSize = 0;
87 rc = VbglGRPerform(&pReq->header);
88 if (RT_FAILURE(rc)) /* this shouldn't happen! */
89 {
90 VbglGRFree(&pReq->header);
91 return rc;
92 }
93
94 /*
95 * The VMM will report back if there is nothing it wants to map, like for
96 * insance in VT-x and AMD-V mode.
97 */
98 if (pReq->hypervisorSize == 0)
99 Log(("vboxGuestInitFixateGuestMappings: nothing to do\n"));
100 else
101 {
102 /*
103 * We have to try several times since the host can be picky
104 * about certain addresses.
105 */
106 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
107 uint32_t cbHypervisor = pReq->hypervisorSize;
108 RTR0MEMOBJ ahTries[5];
109 uint32_t iTry;
110 bool fBitched = false;
111 Log(("vboxGuestInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
112 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
113 {
114 /*
115 * Reserve space, or if that isn't supported, create a object for
116 * some fictive physical memory and map that in to kernel space.
117 *
118 * To make the code a bit uglier, most systems cannot help with
119 * 4MB alignment, so we have to deal with that in addition to
120 * having two ways of getting the memory.
121 */
122 uint32_t uAlignment = _4M;
123 RTR0MEMOBJ hObj;
124 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
125 if (rc == VERR_NOT_SUPPORTED)
126 {
127 uAlignment = PAGE_SIZE;
128 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
129 }
130 if (rc == VERR_NOT_SUPPORTED)
131 {
132 if (hFictive == NIL_RTR0MEMOBJ)
133 {
134 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
135 if (RT_FAILURE(rc))
136 break;
137 hFictive = hObj;
138 }
139 uAlignment = _4M;
140 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
141 if (rc == VERR_NOT_SUPPORTED)
142 {
143 uAlignment = PAGE_SIZE;
144 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
145 }
146 }
147 if (RT_FAILURE(rc))
148 {
149 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
150 rc, cbHypervisor, uAlignment, iTry));
151 fBitched = true;
152 break;
153 }
154
155 /*
156 * Try set it.
157 */
158 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
159 pReq->header.rc = VERR_INTERNAL_ERROR;
160 pReq->hypervisorSize = cbHypervisor;
161 pReq->hypervisorStart = (uintptr_t)RTR0MemObjAddress(hObj);
162 if ( uAlignment == PAGE_SIZE
163 && pReq->hypervisorStart & (_4M - 1))
164 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
165 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
166
167 rc = VbglGRPerform(&pReq->header);
168 if (RT_SUCCESS(rc))
169 {
170 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
171 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
172 RTR0MemObjAddress(pDevExt->hGuestMappings),
173 RTR0MemObjSize(pDevExt->hGuestMappings),
174 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
175 break;
176 }
177 ahTries[iTry] = hObj;
178 }
179
180 /*
181 * Cleanup failed attempts.
182 */
183 while (iTry-- > 0)
184 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
185 if ( RT_FAILURE(rc)
186 && hFictive != NIL_RTR0PTR)
187 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
188 if (RT_FAILURE(rc) && !fBitched)
189 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
190 }
191 VbglGRFree(&pReq->header);
192
193 /*
194 * We ignore failed attempts for now.
195 */
196 return VINF_SUCCESS;
197}
198
199
200/**
201 * Undo what vboxGuestInitFixateGuestMappings did.
202 *
203 * @param pDevExt The device extension.
204 */
205static void vboxGuestTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
206{
207 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
208 {
209 /*
210 * Tell the host that we're going to free the memory we reserved for
211 * it, the free it up. (Leak the memory if anything goes wrong here.)
212 */
213 VMMDevReqHypervisorInfo *pReq;
214 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
215 if (RT_SUCCESS(rc))
216 {
217 pReq->hypervisorStart = 0;
218 pReq->hypervisorSize = 0;
219 rc = VbglGRPerform(&pReq->header);
220 VbglGRFree(&pReq->header);
221 }
222 if (RT_SUCCESS(rc))
223 {
224 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
225 AssertRC(rc);
226 }
227 else
228 LogRel(("vboxGuestTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
229
230 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
231 }
232}
233
234
235/**
236 * Sets the interrupt filter mask during initialization and termination.
237 *
238 * This will ASSUME that we're the ones in carge over the mask, so
239 * we'll simply clear all bits we don't set.
240 *
241 * @returns VBox status code (ignored).
242 * @param pDevExt The device extension.
243 * @param fMask The new mask.
244 */
245static int vboxGuestSetFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask)
246{
247 VMMDevCtlGuestFilterMask *pReq;
248 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
249 if (RT_SUCCESS(rc))
250 {
251 pReq->u32OrMask = fMask;
252 pReq->u32NotMask = ~fMask;
253 rc = VbglGRPerform(&pReq->header);
254 if (RT_FAILURE(rc))
255 LogRel(("vboxGuestSetFilterMask: failed with rc=%Rrc\n", rc));
256 VbglGRFree(&pReq->header);
257 }
258 return rc;
259}
260
261
262/**
263 * Inflate the balloon by one chunk represented by an R0 memory object.
264 *
265 * The caller owns the balloon mutex.
266 *
267 * @returns IPRT status code.
268 * @param pMemObj Pointer to the R0 memory object.
269 * @param pReq The pre-allocated request for performing the VMMDev call.
270 */
271static int vboxGuestBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
272{
273 uint32_t iPage;
274 int rc;
275
276 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
277 {
278 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
279 pReq->aPhysPage[iPage] = phys;
280 }
281
282 pReq->fInflate = true;
283 pReq->header.size = cbChangeMemBalloonReq;
284 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
285
286 rc = VbglGRPerform(&pReq->header);
287 if (RT_FAILURE(rc))
288 LogRel(("vboxGuestBalloonInflate: VbglGRPerform failed. rc=%Rrc\n", rc));
289 return rc;
290}
291
292
293/**
294 * Deflate the balloon by one chunk - info the host and free the memory object.
295 *
296 * The caller owns the balloon mutex.
297 *
298 * @returns IPRT status code.
299 * @param pMemObj Pointer to the R0 memory object.
300 * The memory object will be freed afterwards.
301 * @param pReq The pre-allocated request for performing the VMMDev call.
302 */
303static int vboxGuestBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
304{
305 uint32_t iPage;
306 int rc;
307
308 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
309 {
310 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
311 pReq->aPhysPage[iPage] = phys;
312 }
313
314 pReq->fInflate = false;
315 pReq->header.size = cbChangeMemBalloonReq;
316 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
317
318 rc = VbglGRPerform(&pReq->header);
319 if (RT_FAILURE(rc))
320 {
321 LogRel(("vboxGuestBalloonDeflate: VbglGRPerform failed. rc=%Rrc\n", rc));
322 return rc;
323 }
324
325 rc = RTR0MemObjFree(*pMemObj, true);
326 if (RT_FAILURE(rc))
327 {
328 LogRel(("vboxGuestBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
329 return rc;
330 }
331
332 *pMemObj = NIL_RTR0MEMOBJ;
333 return VINF_SUCCESS;
334}
335
336
337/**
338 * Inflate/deflate the memory balloon and notify the host.
339 *
340 * This is a worker used by VBoxGuestCommonIOCtl_CheckMemoryBalloon - it takes
341 * the mutex.
342 *
343 * @returns VBox status code.
344 * @param pDevExt The device extension.
345 * @param pSession The session.
346 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
347 * @param pfHandleInR3 Where to return the handle-in-ring3 indicator
348 * (VINF_SUCCESS if set).
349 */
350static int vboxGuestSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, uint32_t *pfHandleInR3)
351{
352 int rc = VINF_SUCCESS;
353
354 if (pDevExt->MemBalloon.fUseKernelAPI)
355 {
356 VMMDevChangeMemBalloon *pReq;
357 uint32_t i;
358
359 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
360 {
361 LogRel(("vboxGuestSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
362 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
363 return VERR_INVALID_PARAMETER;
364 }
365
366 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
367 return VINF_SUCCESS; /* nothing to do */
368
369 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
370 && !pDevExt->MemBalloon.paMemObj)
371 {
372 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
373 if (!pDevExt->MemBalloon.paMemObj)
374 {
375 LogRel(("VBoxGuestSetBalloonSizeKernel: no memory for paMemObj!\n"));
376 return VERR_NO_MEMORY;
377 }
378 }
379
380 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
381 if (RT_FAILURE(rc))
382 return rc;
383
384 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
385 {
386 /* inflate */
387 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
388 {
389 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
390 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
391 if (RT_FAILURE(rc))
392 {
393 if (rc == VERR_NOT_SUPPORTED)
394 {
395 /* not supported -- fall back to the R3-allocated memory. */
396 rc = VINF_SUCCESS;
397 pDevExt->MemBalloon.fUseKernelAPI = false;
398 Assert(pDevExt->MemBalloon.cChunks == 0);
399 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
400 }
401 /* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
402 * cannot allocate more memory => don't try further, just stop here */
403 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
404 break;
405 }
406
407 rc = vboxGuestBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
408 if (RT_FAILURE(rc))
409 {
410 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
411 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
412 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
413 break;
414 }
415 pDevExt->MemBalloon.cChunks++;
416 }
417 }
418 else
419 {
420 /* deflate */
421 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
422 {
423 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
424 if (RT_FAILURE(rc))
425 {
426 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
427 break;
428 }
429 pDevExt->MemBalloon.cChunks--;
430 }
431 }
432
433 VbglGRFree(&pReq->header);
434 }
435
436 /*
437 * Set the handle-in-ring3 indicator. When set Ring-3 will have to work
438 * the balloon changes via the other API.
439 */
440 *pfHandleInR3 = pDevExt->MemBalloon.fUseKernelAPI ? false : true;
441
442 return rc;
443}
444
445
446/**
447 * Inflate/deflate the balloon by one chunk.
448 *
449 * Worker for VBoxGuestCommonIOCtl_ChangeMemoryBalloon - it takes the mutex.
450 *
451 * @returns VBox status code.
452 * @param pDevExt The device extension.
453 * @param pSession The session.
454 * @param u64ChunkAddr The address of the chunk to add to / remove from the
455 * balloon.
456 * @param fInflate Inflate if true, deflate if false.
457 */
458static int vboxGuestSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
459 uint64_t u64ChunkAddr, bool fInflate)
460{
461 VMMDevChangeMemBalloon *pReq;
462 int rc = VINF_SUCCESS;
463 uint32_t i;
464 PRTR0MEMOBJ pMemObj = NULL;
465
466 if (fInflate)
467 {
468 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
469 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
470 {
471 LogRel(("vboxGuestSetBalloonSize: cannot inflate balloon, already have %u chunks (max=%u)\n",
472 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
473 return VERR_INVALID_PARAMETER;
474 }
475
476 if (!pDevExt->MemBalloon.paMemObj)
477 {
478 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
479 if (!pDevExt->MemBalloon.paMemObj)
480 {
481 LogRel(("VBoxGuestSetBalloonSizeFromUser: no memory for paMemObj!\n"));
482 return VERR_NO_MEMORY;
483 }
484 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
485 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
486 }
487 }
488 else
489 {
490 if (pDevExt->MemBalloon.cChunks == 0)
491 {
492 AssertMsgFailed(("vboxGuestSetBalloonSize: cannot decrease balloon, already at size 0\n"));
493 return VERR_INVALID_PARAMETER;
494 }
495 }
496
497 /*
498 * Enumerate all memory objects and check if the object is already registered.
499 */
500 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
501 {
502 if ( fInflate
503 && !pMemObj
504 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
505 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
506 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == u64ChunkAddr)
507 {
508 if (fInflate)
509 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
510 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
511 break;
512 }
513 }
514 if (!pMemObj)
515 {
516 if (fInflate)
517 {
518 /* no free object pointer found -- should not happen */
519 return VERR_NO_MEMORY;
520 }
521
522 /* cannot free this memory as it wasn't provided before */
523 return VERR_NOT_FOUND;
524 }
525
526 /*
527 * Try inflate / defalte the balloon as requested.
528 */
529 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
530 if (RT_FAILURE(rc))
531 return rc;
532
533 if (fInflate)
534 {
535 rc = RTR0MemObjLockUser(pMemObj, u64ChunkAddr, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
536 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
537 if (RT_SUCCESS(rc))
538 {
539 rc = vboxGuestBalloonInflate(pMemObj, pReq);
540 if (RT_SUCCESS(rc))
541 pDevExt->MemBalloon.cChunks++;
542 else
543 {
544 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
545 RTR0MemObjFree(*pMemObj, true);
546 *pMemObj = NIL_RTR0MEMOBJ;
547 }
548 }
549 }
550 else
551 {
552 rc = vboxGuestBalloonDeflate(pMemObj, pReq);
553 if (RT_SUCCESS(rc))
554 pDevExt->MemBalloon.cChunks--;
555 else
556 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
557 }
558
559 VbglGRFree(&pReq->header);
560 return rc;
561}
562
563
564/**
565 * Cleanup the memory balloon of a session.
566 *
567 * Will request the balloon mutex, so it must be valid and the caller must not
568 * own it already.
569 *
570 * @param pDevExt The device extension.
571 * @param pDevExt The session. Can be NULL at unload.
572 */
573static void vboxGuestCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
574{
575 RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
576 if ( pDevExt->MemBalloon.pOwner == pSession
577 || pSession == NULL /*unload*/)
578 {
579 if (pDevExt->MemBalloon.paMemObj)
580 {
581 VMMDevChangeMemBalloon *pReq;
582 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
583 if (RT_SUCCESS(rc))
584 {
585 uint32_t i;
586 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
587 {
588 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
589 if (RT_FAILURE(rc))
590 {
591 LogRel(("vboxGuestCloseMemBalloon: Deflate failed with rc=%Rrc. Will leak %u chunks.\n",
592 rc, pDevExt->MemBalloon.cChunks));
593 break;
594 }
595 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
596 pDevExt->MemBalloon.cChunks--;
597 }
598 VbglGRFree(&pReq->header);
599 }
600 else
601 LogRel(("vboxGuestCloseMemBalloon: Failed to allocate VMMDev request buffer (rc=%Rrc). Will leak %u chunks.\n",
602 rc, pDevExt->MemBalloon.cChunks));
603 RTMemFree(pDevExt->MemBalloon.paMemObj);
604 pDevExt->MemBalloon.paMemObj = NULL;
605 }
606
607 pDevExt->MemBalloon.pOwner = NULL;
608 }
609 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
610}
611
612
613/**
614 * Initializes the VBoxGuest device extension when the
615 * device driver is loaded.
616 *
617 * The native code locates the VMMDev on the PCI bus and retrieve
618 * the MMIO and I/O port ranges, this function will take care of
619 * mapping the MMIO memory (if present). Upon successful return
620 * the native code should set up the interrupt handler.
621 *
622 * @returns VBox status code.
623 *
624 * @param pDevExt The device extension. Allocated by the native code.
625 * @param IOPortBase The base of the I/O port range.
626 * @param pvMMIOBase The base of the MMIO memory mapping.
627 * This is optional, pass NULL if not present.
628 * @param cbMMIO The size of the MMIO memory mapping.
629 * This is optional, pass 0 if not present.
630 * @param enmOSType The guest OS type to report to the VMMDev.
631 * @param fFixedEvents Events that will be enabled upon init and no client
632 * will ever be allowed to mask.
633 */
634int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
635 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
636{
637 int rc, rc2;
638
639 /*
640 * Adjust fFixedEvents.
641 */
642#ifdef VBOX_WITH_HGCM
643 fFixedEvents |= VMMDEV_EVENT_HGCM;
644#endif
645
646 /*
647 * Initalize the data.
648 */
649 pDevExt->IOPortBase = IOPortBase;
650 pDevExt->pVMMDevMemory = NULL;
651 pDevExt->fFixedEvents = fFixedEvents;
652 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
653 pDevExt->EventSpinlock = NIL_RTSPINLOCK;
654 pDevExt->pIrqAckEvents = NULL;
655 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
656 pDevExt->WaitList.pHead = NULL;
657 pDevExt->WaitList.pTail = NULL;
658#ifdef VBOX_WITH_HGCM
659 pDevExt->HGCMWaitList.pHead = NULL;
660 pDevExt->HGCMWaitList.pTail = NULL;
661#endif
662 pDevExt->FreeList.pHead = NULL;
663 pDevExt->FreeList.pTail = NULL;
664 pDevExt->f32PendingEvents = 0;
665 pDevExt->u32MousePosChangedSeq = 0;
666 pDevExt->SessionSpinlock = NIL_RTSPINLOCK;
667 pDevExt->u32ClipboardClientId = 0;
668 pDevExt->MemBalloon.hMtx = NIL_RTSEMFASTMUTEX;
669 pDevExt->MemBalloon.cChunks = 0;
670 pDevExt->MemBalloon.cMaxChunks = 0;
671 pDevExt->MemBalloon.fUseKernelAPI = true;
672 pDevExt->MemBalloon.paMemObj = NULL;
673 pDevExt->MemBalloon.pOwner = NULL;
674
675 /*
676 * If there is an MMIO region validate the version and size.
677 */
678 if (pvMMIOBase)
679 {
680 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
681 Assert(cbMMIO);
682 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
683 && pVMMDev->u32Size >= 32
684 && pVMMDev->u32Size <= cbMMIO)
685 {
686 pDevExt->pVMMDevMemory = pVMMDev;
687 Log(("VBoxGuestInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
688 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
689 }
690 else /* try live without it. */
691 LogRel(("VBoxGuestInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
692 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
693 }
694
695 /*
696 * Create the wait and session spinlocks as well as the ballooning mutex.
697 */
698 rc = RTSpinlockCreate(&pDevExt->EventSpinlock);
699 if (RT_SUCCESS(rc))
700 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock);
701 if (RT_FAILURE(rc))
702 {
703 LogRel(("VBoxGuestInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
704 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
705 RTSpinlockDestroy(pDevExt->EventSpinlock);
706 return rc;
707 }
708
709 rc = RTSemFastMutexCreate(&pDevExt->MemBalloon.hMtx);
710 if (RT_FAILURE(rc))
711 {
712 LogRel(("VBoxGuestInitDevExt: failed to create mutex, rc=%Rrc!\n", rc));
713 RTSpinlockDestroy(pDevExt->SessionSpinlock);
714 RTSpinlockDestroy(pDevExt->EventSpinlock);
715 return rc;
716 }
717
718 /*
719 * Initialize the guest library and report the guest info back to VMMDev,
720 * set the interrupt control filter mask, and fixate the guest mappings
721 * made by the VMM.
722 */
723 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
724 if (RT_SUCCESS(rc))
725 {
726 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
727 if (RT_SUCCESS(rc))
728 {
729 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
730 Assert(pDevExt->PhysIrqAckEvents != 0);
731
732 rc = VBoxGuestReportGuestInfo(enmOSType);
733 if (RT_SUCCESS(rc))
734 {
735 rc = vboxGuestSetFilterMask(pDevExt, fFixedEvents);
736 if (RT_SUCCESS(rc))
737 {
738 /*
739 * Disable guest graphics capability by default. The guest specific
740 * graphics driver will re-enable this when it is necessary.
741 */
742 rc = VBoxGuestSetGuestCapabilities(0, VMMDEV_GUEST_SUPPORTS_GRAPHICS);
743 if (RT_SUCCESS(rc))
744 {
745 vboxGuestInitFixateGuestMappings(pDevExt);
746
747 rc = VBoxGuestReportDriverStatus(true /* Driver is active */);
748 if (RT_FAILURE(rc))
749 LogRel(("VBoxGuestInitDevExt: VBoxReportGuestDriverStatus failed, rc=%Rrc\n", rc));
750
751 Log(("VBoxGuestInitDevExt: returns success\n"));
752 return VINF_SUCCESS;
753 }
754
755 LogRel(("VBoxGuestInitDevExt: VBoxGuestSetGuestCapabilities failed, rc=%Rrc\n", rc));
756 }
757 else
758 LogRel(("VBoxGuestInitDevExt: vboxGuestSetFilterMask failed, rc=%Rrc\n", rc));
759 }
760 else
761 LogRel(("VBoxGuestInitDevExt: VBoxReportGuestInfo failed, rc=%Rrc\n", rc));
762 VbglGRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
763 }
764 else
765 LogRel(("VBoxGuestInitDevExt: VBoxGRAlloc failed, rc=%Rrc\n", rc));
766
767 VbglTerminate();
768 }
769 else
770 LogRel(("VBoxGuestInitDevExt: VbglInit failed, rc=%Rrc\n", rc));
771
772 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
773 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
774 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
775 return rc; /* (failed) */
776}
777
778
779/**
780 * Deletes all the items in a wait chain.
781 * @param pWait The head of the chain.
782 */
783static void VBoxGuestDeleteWaitList(PVBOXGUESTWAITLIST pList)
784{
785 while (pList->pHead)
786 {
787 int rc2;
788 PVBOXGUESTWAIT pWait = pList->pHead;
789 pList->pHead = pWait->pNext;
790
791 pWait->pNext = NULL;
792 pWait->pPrev = NULL;
793 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
794 pWait->Event = NIL_RTSEMEVENTMULTI;
795 pWait->pSession = NULL;
796 RTMemFree(pWait);
797 }
798 pList->pHead = NULL;
799 pList->pTail = NULL;
800}
801
802
803/**
804 * Destroys the VBoxGuest device extension.
805 *
806 * The native code should call this before the driver is loaded,
807 * but don't call this on shutdown.
808 *
809 * @param pDevExt The device extension.
810 */
811void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
812{
813 int rc2;
814 Log(("VBoxGuestDeleteDevExt:\n"));
815 Log(("VBoxGuest: The additions driver is terminating.\n"));
816
817 /*
818 * Clean up the bits that involves the host first.
819 */
820 vboxGuestTermUnfixGuestMappings(pDevExt);
821 VBoxGuestSetGuestCapabilities(0, UINT32_MAX); /* clears all capabilities */
822 vboxGuestSetFilterMask(pDevExt, 0); /* filter all events */
823 vboxGuestCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
824
825 /*
826 * Cleanup all the other resources.
827 */
828 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
829 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
830 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
831
832 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
833#ifdef VBOX_WITH_HGCM
834 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
835#endif
836 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
837
838 VbglTerminate();
839
840 pDevExt->pVMMDevMemory = NULL;
841
842 pDevExt->IOPortBase = 0;
843 pDevExt->pIrqAckEvents = NULL;
844}
845
846
847/**
848 * Creates a VBoxGuest user session.
849 *
850 * The native code calls this when a ring-3 client opens the device.
851 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
852 *
853 * @returns VBox status code.
854 * @param pDevExt The device extension.
855 * @param ppSession Where to store the session on success.
856 */
857int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
858{
859 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
860 if (RT_UNLIKELY(!pSession))
861 {
862 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
863 return VERR_NO_MEMORY;
864 }
865
866 pSession->Process = RTProcSelf();
867 pSession->R0Process = RTR0ProcHandleSelf();
868 pSession->pDevExt = pDevExt;
869
870 *ppSession = pSession;
871 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
872 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
873 return VINF_SUCCESS;
874}
875
876
877/**
878 * Creates a VBoxGuest kernel session.
879 *
880 * The native code calls this when a ring-0 client connects to the device.
881 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
882 *
883 * @returns VBox status code.
884 * @param pDevExt The device extension.
885 * @param ppSession Where to store the session on success.
886 */
887int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
888{
889 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
890 if (RT_UNLIKELY(!pSession))
891 {
892 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
893 return VERR_NO_MEMORY;
894 }
895
896 pSession->Process = NIL_RTPROCESS;
897 pSession->R0Process = NIL_RTR0PROCESS;
898 pSession->pDevExt = pDevExt;
899
900 *ppSession = pSession;
901 LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
902 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
903 return VINF_SUCCESS;
904}
905
906
907
908/**
909 * Closes a VBoxGuest session.
910 *
911 * @param pDevExt The device extension.
912 * @param pSession The session to close (and free).
913 */
914void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
915{
916 unsigned i; NOREF(i);
917 Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
918 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
919
920#ifdef VBOX_WITH_HGCM
921 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
922 if (pSession->aHGCMClientIds[i])
923 {
924 VBoxGuestHGCMDisconnectInfo Info;
925 Info.result = 0;
926 Info.u32ClientID = pSession->aHGCMClientIds[i];
927 pSession->aHGCMClientIds[i] = 0;
928 Log(("VBoxGuestCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
929 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
930 }
931#endif
932
933 pSession->pDevExt = NULL;
934 pSession->Process = NIL_RTPROCESS;
935 pSession->R0Process = NIL_RTR0PROCESS;
936 vboxGuestCloseMemBalloon(pDevExt, pSession);
937 RTMemFree(pSession);
938}
939
940
941/**
942 * Links the wait-for-event entry into the tail of the given list.
943 *
944 * @param pList The list to link it into.
945 * @param pWait The wait for event entry to append.
946 */
947DECLINLINE(void) VBoxGuestWaitAppend(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
948{
949 const PVBOXGUESTWAIT pTail = pList->pTail;
950 pWait->pNext = NULL;
951 pWait->pPrev = pTail;
952 if (pTail)
953 pTail->pNext = pWait;
954 else
955 pList->pHead = pWait;
956 pList->pTail = pWait;
957}
958
959
960/**
961 * Unlinks the wait-for-event entry.
962 *
963 * @param pList The list to unlink it from.
964 * @param pWait The wait for event entry to unlink.
965 */
966DECLINLINE(void) VBoxGuestWaitUnlink(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
967{
968 const PVBOXGUESTWAIT pPrev = pWait->pPrev;
969 const PVBOXGUESTWAIT pNext = pWait->pNext;
970 if (pNext)
971 pNext->pPrev = pPrev;
972 else
973 pList->pTail = pPrev;
974 if (pPrev)
975 pPrev->pNext = pNext;
976 else
977 pList->pHead = pNext;
978}
979
980
981/**
982 * Allocates a wiat-for-event entry.
983 *
984 * @returns The wait-for-event entry.
985 * @param pDevExt The device extension.
986 * @param pSession The session that's allocating this. Can be NULL.
987 */
988static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
989{
990 /*
991 * Allocate it one way or the other.
992 */
993 PVBOXGUESTWAIT pWait = pDevExt->FreeList.pTail;
994 if (pWait)
995 {
996 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
997 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
998
999 pWait = pDevExt->FreeList.pTail;
1000 if (pWait)
1001 VBoxGuestWaitUnlink(&pDevExt->FreeList, pWait);
1002
1003 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1004 }
1005 if (!pWait)
1006 {
1007 static unsigned s_cErrors = 0;
1008 int rc;
1009
1010 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1011 if (!pWait)
1012 {
1013 if (s_cErrors++ < 32)
1014 LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
1015 return NULL;
1016 }
1017
1018 rc = RTSemEventMultiCreate(&pWait->Event);
1019 if (RT_FAILURE(rc))
1020 {
1021 if (s_cErrors++ < 32)
1022 LogRel(("VBoxGuestCommonIOCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
1023 RTMemFree(pWait);
1024 return NULL;
1025 }
1026 }
1027
1028 /*
1029 * Zero members just as an precaution.
1030 */
1031 pWait->pNext = NULL;
1032 pWait->pPrev = NULL;
1033 pWait->fReqEvents = 0;
1034 pWait->fResEvents = 0;
1035 pWait->pSession = pSession;
1036#ifdef VBOX_WITH_HGCM
1037 pWait->pHGCMReq = NULL;
1038#endif
1039 RTSemEventMultiReset(pWait->Event);
1040 return pWait;
1041}
1042
1043
1044/**
1045 * Frees the wait-for-event entry.
1046 * The caller must own the wait spinlock!
1047 *
1048 * @param pDevExt The device extension.
1049 * @param pWait The wait-for-event entry to free.
1050 */
1051static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1052{
1053 pWait->fReqEvents = 0;
1054 pWait->fResEvents = 0;
1055#ifdef VBOX_WITH_HGCM
1056 pWait->pHGCMReq = NULL;
1057#endif
1058 VBoxGuestWaitAppend(&pDevExt->FreeList, pWait);
1059}
1060
1061
1062/**
1063 * Frees the wait-for-event entry.
1064 *
1065 * @param pDevExt The device extension.
1066 * @param pWait The wait-for-event entry to free.
1067 */
1068static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1069{
1070 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1071 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1072 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1073 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1074}
1075
1076
1077/**
1078 * Modifies the guest capabilities.
1079 *
1080 * Should be called during driver init and termination.
1081 *
1082 * @returns VBox status code.
1083 * @param fOr The Or mask (what to enable).
1084 * @param fNot The Not mask (what to disable).
1085 */
1086int VBoxGuestSetGuestCapabilities(uint32_t fOr, uint32_t fNot)
1087{
1088 VMMDevReqGuestCapabilities2 *pReq;
1089 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
1090 if (RT_FAILURE(rc))
1091 {
1092 Log(("VBoxGuestSetGuestCapabilities: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1093 sizeof(*pReq), sizeof(*pReq), rc));
1094 return rc;
1095 }
1096
1097 pReq->u32OrMask = fOr;
1098 pReq->u32NotMask = fNot;
1099
1100 rc = VbglGRPerform(&pReq->header);
1101 if (RT_FAILURE(rc))
1102 Log(("VBoxGuestSetGuestCapabilities: VbglGRPerform failed, rc=%Rrc!\n", rc));
1103
1104 VbglGRFree(&pReq->header);
1105 return rc;
1106}
1107
1108
1109/**
1110 * Implements the fast (no input or output) type of IOCtls.
1111 *
1112 * This is currently just a placeholder stub inherited from the support driver code.
1113 *
1114 * @returns VBox status code.
1115 * @param iFunction The IOCtl function number.
1116 * @param pDevExt The device extension.
1117 * @param pSession The session.
1118 */
1119int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1120{
1121 Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1122
1123 NOREF(iFunction);
1124 NOREF(pDevExt);
1125 NOREF(pSession);
1126 return VERR_NOT_SUPPORTED;
1127}
1128
1129
1130/**
1131 * Return the VMM device port.
1132 *
1133 * returns IPRT status code.
1134 * @param pDevExt The device extension.
1135 * @param pInfo The request info.
1136 * @param pcbDataReturned (out) contains the number of bytes to return.
1137 */
1138static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
1139{
1140 Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
1141 pInfo->portAddress = pDevExt->IOPortBase;
1142 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
1143 if (pcbDataReturned)
1144 *pcbDataReturned = sizeof(*pInfo);
1145 return VINF_SUCCESS;
1146}
1147
1148
1149/**
1150 * Worker VBoxGuestCommonIOCtl_WaitEvent.
1151 * The caller enters the spinlock, we may or may not leave it.
1152 *
1153 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
1154 */
1155DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWaitEventInfo *pInfo,
1156 int iEvent, const uint32_t fReqEvents, PRTSPINLOCKTMP pTmp)
1157{
1158 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
1159 if (fMatches)
1160 {
1161 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
1162 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, pTmp);
1163
1164 pInfo->u32EventFlagsOut = fMatches;
1165 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1166 if (fReqEvents & ~((uint32_t)1 << iEvent))
1167 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1168 else
1169 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1170 return VINF_SUCCESS;
1171 }
1172 return VERR_TIMEOUT;
1173}
1174
1175
1176static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1177 VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned, bool fInterruptible)
1178{
1179 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1180 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
1181 uint32_t fResEvents;
1182 int iEvent;
1183 PVBOXGUESTWAIT pWait;
1184 int rc;
1185
1186 pInfo->u32EventFlagsOut = 0;
1187 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1188 if (pcbDataReturned)
1189 *pcbDataReturned = sizeof(*pInfo);
1190
1191 /*
1192 * Copy and verify the input mask.
1193 */
1194 iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
1195 if (RT_UNLIKELY(iEvent < 0))
1196 {
1197 Log(("VBoxGuestCommonIOCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
1198 return VERR_INVALID_PARAMETER;
1199 }
1200
1201 /*
1202 * Check the condition up front, before doing the wait-for-event allocations.
1203 */
1204 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1205 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
1206 if (rc == VINF_SUCCESS)
1207 return rc;
1208 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1209
1210 if (!pInfo->u32TimeoutIn)
1211 {
1212 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1213 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
1214 return VERR_TIMEOUT;
1215 }
1216
1217 pWait = VBoxGuestWaitAlloc(pDevExt, pSession);
1218 if (!pWait)
1219 return VERR_NO_MEMORY;
1220 pWait->fReqEvents = fReqEvents;
1221
1222 /*
1223 * We've got the wait entry now, re-enter the spinlock and check for the condition.
1224 * If the wait condition is met, return.
1225 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
1226 */
1227 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1228 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
1229 if (rc == VINF_SUCCESS)
1230 {
1231 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1232 return rc;
1233 }
1234 VBoxGuestWaitAppend(&pDevExt->WaitList, pWait);
1235 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1236
1237 if (fInterruptible)
1238 rc = RTSemEventMultiWaitNoResume(pWait->Event,
1239 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1240 else
1241 rc = RTSemEventMultiWait(pWait->Event,
1242 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1243
1244 /*
1245 * There is one special case here and that's when the semaphore is
1246 * destroyed upon device driver unload. This shouldn't happen of course,
1247 * but in case it does, just get out of here ASAP.
1248 */
1249 if (rc == VERR_SEM_DESTROYED)
1250 return rc;
1251
1252 /*
1253 * Unlink the wait item and dispose of it.
1254 */
1255 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1256 VBoxGuestWaitUnlink(&pDevExt->WaitList, pWait);
1257 fResEvents = pWait->fResEvents;
1258 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1259 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1260
1261 /*
1262 * Now deal with the return code.
1263 */
1264 if ( fResEvents
1265 && fResEvents != UINT32_MAX)
1266 {
1267 pInfo->u32EventFlagsOut = fResEvents;
1268 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1269 if (fReqEvents & ~((uint32_t)1 << iEvent))
1270 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1271 else
1272 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1273 rc = VINF_SUCCESS;
1274 }
1275 else if ( fResEvents == UINT32_MAX
1276 || rc == VERR_INTERRUPTED)
1277 {
1278 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
1279 rc = VERR_INTERRUPTED;
1280 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
1281 }
1282 else if (rc == VERR_TIMEOUT)
1283 {
1284 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1285 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
1286 }
1287 else
1288 {
1289 if (RT_SUCCESS(rc))
1290 {
1291 static unsigned s_cErrors = 0;
1292 if (s_cErrors++ < 32)
1293 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
1294 rc = VERR_INTERNAL_ERROR;
1295 }
1296 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1297 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc\n", rc));
1298 }
1299
1300 return rc;
1301}
1302
1303
1304static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1305{
1306 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1307#if defined(RT_OS_SOLARIS)
1308 RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER;
1309#endif
1310 PVBOXGUESTWAIT pWait;
1311 int rc = 0;
1312
1313 Log(("VBoxGuestCommonIOCtl: CANCEL_ALL_WAITEVENTS\n"));
1314
1315 /*
1316 * Walk the event list and wake up anyone with a matching session.
1317 *
1318 * Note! On Solaris we have to do really ugly stuff here because
1319 * RTSemEventMultiSignal cannot be called with interrupts disabled.
1320 * The hack is racy, but what we can we do... (Eliminate this
1321 * termination hack, perhaps?)
1322 */
1323#if defined(RT_OS_SOLARIS)
1324 RTThreadPreemptDisable(&State);
1325 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1326 do
1327 {
1328 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
1329 if ( pWait->pSession == pSession
1330 && pWait->fResEvents != UINT32_MAX)
1331 {
1332 RTSEMEVENTMULTI hEvent = pWait->Event;
1333 pWait->fResEvents = UINT32_MAX;
1334 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1335 /* HACK ALRET! This races wakeup + reuse! */
1336 rc |= RTSemEventMultiSignal(hEvent);
1337 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1338 break;
1339 }
1340 } while (pWait);
1341 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1342 RTThreadPreemptDisable(&State);
1343#else
1344 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1345 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
1346 if (pWait->pSession == pSession)
1347 {
1348 pWait->fResEvents = UINT32_MAX;
1349 rc |= RTSemEventMultiSignal(pWait->Event);
1350 }
1351 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1352#endif
1353 Assert(rc == 0);
1354
1355 return VINF_SUCCESS;
1356}
1357
1358
1359static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1360 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
1361{
1362 int rc;
1363 VMMDevRequestHeader *pReqCopy;
1364
1365 /*
1366 * Validate the header and request size.
1367 */
1368 const VMMDevRequestType enmType = pReqHdr->requestType;
1369 const uint32_t cbReq = pReqHdr->size;
1370 const uint32_t cbMinSize = vmmdevGetRequestSize(enmType);
1371
1372 Log(("VBoxGuestCommonIOCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
1373
1374 if (cbReq < cbMinSize)
1375 {
1376 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
1377 cbReq, cbMinSize, enmType));
1378 return VERR_INVALID_PARAMETER;
1379 }
1380 if (cbReq > cbData)
1381 {
1382 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
1383 cbData, cbReq, enmType));
1384 return VERR_INVALID_PARAMETER;
1385 }
1386 rc = VbglGRVerify(pReqHdr, cbData);
1387 if (RT_FAILURE(rc))
1388 {
1389 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc!!\n",
1390 cbData, cbReq, enmType, rc));
1391 return rc;
1392 }
1393
1394 /*
1395 * Make a copy of the request in the physical memory heap so
1396 * the VBoxGuestLibrary can more easily deal with the request.
1397 * (This is really a waste of time since the OS or the OS specific
1398 * code has already buffered or locked the input/output buffer, but
1399 * it does makes things a bit simpler wrt to phys address.)
1400 */
1401 rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
1402 if (RT_FAILURE(rc))
1403 {
1404 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1405 cbReq, cbReq, rc));
1406 return rc;
1407 }
1408 memcpy(pReqCopy, pReqHdr, cbReq);
1409
1410 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
1411 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
1412
1413 rc = VbglGRPerform(pReqCopy);
1414 if ( RT_SUCCESS(rc)
1415 && RT_SUCCESS(pReqCopy->rc))
1416 {
1417 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
1418 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
1419
1420 memcpy(pReqHdr, pReqCopy, cbReq);
1421 if (pcbDataReturned)
1422 *pcbDataReturned = cbReq;
1423 }
1424 else if (RT_FAILURE(rc))
1425 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
1426 else
1427 {
1428 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
1429 rc = pReqCopy->rc;
1430 }
1431
1432 VbglGRFree(pReqCopy);
1433 return rc;
1434}
1435
1436
1437static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, VBoxGuestFilterMaskInfo *pInfo)
1438{
1439 VMMDevCtlGuestFilterMask *pReq;
1440 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
1441 if (RT_FAILURE(rc))
1442 {
1443 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1444 sizeof(*pReq), sizeof(*pReq), rc));
1445 return rc;
1446 }
1447
1448 pReq->u32OrMask = pInfo->u32OrMask;
1449 pReq->u32NotMask = pInfo->u32NotMask;
1450 pReq->u32NotMask &= ~pDevExt->fFixedEvents; /* don't permit these to be cleared! */
1451 rc = VbglGRPerform(&pReq->header);
1452 if (RT_FAILURE(rc))
1453 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: VbglGRPerform failed, rc=%Rrc!\n", rc));
1454
1455 VbglGRFree(&pReq->header);
1456 return rc;
1457}
1458
1459#ifdef VBOX_WITH_HGCM
1460
1461AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
1462
1463/** Worker for VBoxGuestHGCMAsyncWaitCallback*. */
1464static int VBoxGuestHGCMAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
1465 bool fInterruptible, uint32_t cMillies)
1466{
1467 int rc;
1468
1469 /*
1470 * Check to see if the condition was met by the time we got here.
1471 *
1472 * We create a simple poll loop here for dealing with out-of-memory
1473 * conditions since the caller isn't necessarily able to deal with
1474 * us returning too early.
1475 */
1476 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1477 PVBOXGUESTWAIT pWait;
1478 for (;;)
1479 {
1480 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1481 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1482 {
1483 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1484 return VINF_SUCCESS;
1485 }
1486 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1487
1488 pWait = VBoxGuestWaitAlloc(pDevExt, NULL);
1489 if (pWait)
1490 break;
1491 if (fInterruptible)
1492 return VERR_INTERRUPTED;
1493 RTThreadSleep(1);
1494 }
1495 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
1496 pWait->pHGCMReq = pHdr;
1497
1498 /*
1499 * Re-enter the spinlock and re-check for the condition.
1500 * If the condition is met, return.
1501 * Otherwise link us into the HGCM wait list and go to sleep.
1502 */
1503 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1504 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1505 {
1506 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1507 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1508 return VINF_SUCCESS;
1509 }
1510 VBoxGuestWaitAppend(&pDevExt->HGCMWaitList, pWait);
1511 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1512
1513 if (fInterruptible)
1514 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
1515 else
1516 rc = RTSemEventMultiWait(pWait->Event, cMillies);
1517 if (rc == VERR_SEM_DESTROYED)
1518 return rc;
1519
1520 /*
1521 * Unlink, free and return.
1522 */
1523 if ( RT_FAILURE(rc)
1524 && rc != VERR_TIMEOUT
1525 && ( !fInterruptible
1526 || rc != VERR_INTERRUPTED))
1527 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
1528
1529 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1530 VBoxGuestWaitUnlink(&pDevExt->HGCMWaitList, pWait);
1531 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1532 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1533 return rc;
1534}
1535
1536
1537/**
1538 * This is a callback for dealing with async waits.
1539 *
1540 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1541 */
1542static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
1543{
1544 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1545 Log(("VBoxGuestHGCMAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
1546 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1547 pDevExt,
1548 false /* fInterruptible */,
1549 u32User /* cMillies */);
1550}
1551
1552
1553/**
1554 * This is a callback for dealing with async waits with a timeout.
1555 *
1556 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1557 */
1558static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr,
1559 void *pvUser, uint32_t u32User)
1560{
1561 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1562 Log(("VBoxGuestHGCMAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
1563 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1564 pDevExt,
1565 true /* fInterruptible */,
1566 u32User /* cMillies */ );
1567
1568}
1569
1570
1571/**
1572 * Helper to (re-)init the HGCM communication.
1573 *
1574 * @param pDevExt Device extension
1575 */
1576int VBoxGuestHGCMInitCommunication(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
1577{
1578 int rc = VBoxGuestReportGuestInfo(enmOSType);
1579 if (RT_SUCCESS(rc))
1580 {
1581 rc = VBoxGuestReportDriverStatus(true /* Driver is active */);
1582 if (RT_FAILURE(rc))
1583 Log(("VBoxGuest::VBoxGuestInitHGCMCommunication: could not report guest driver status, vrc = %d\n", rc));
1584 }
1585 else
1586 Log(("VBoxGuest::VBoxGuestInitHGCMCommunication: could not report guest information to host, vrc = %d\n", rc));
1587 Log(("VBoxGuest::VBoxGuestInitHGCMCommunication: returned with vrc = %d\n", rc));
1588 return rc;
1589}
1590
1591
1592static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1593 VBoxGuestHGCMConnectInfo *pInfo, size_t *pcbDataReturned)
1594{
1595 int rc;
1596
1597 /*
1598 * The VbglHGCMConnect call will invoke the callback if the HGCM
1599 * call is performed in an ASYNC fashion. The function is not able
1600 * to deal with cancelled requests.
1601 */
1602 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
1603 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
1604 ? pInfo->Loc.u.host.achName : "<not local host>"));
1605
1606 rc = VbglR0HGCMInternalConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1607 if (RT_SUCCESS(rc))
1608 {
1609 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
1610 pInfo->u32ClientID, pInfo->result, rc));
1611 if (RT_SUCCESS(pInfo->result))
1612 {
1613 /*
1614 * Append the client id to the client id table.
1615 * If the table has somehow become filled up, we'll disconnect the session.
1616 */
1617 unsigned i;
1618 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1619 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1620 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1621 if (!pSession->aHGCMClientIds[i])
1622 {
1623 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
1624 break;
1625 }
1626 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1627 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1628 {
1629 static unsigned s_cErrors = 0;
1630 VBoxGuestHGCMDisconnectInfo Info;
1631
1632 if (s_cErrors++ < 32)
1633 LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
1634
1635 Info.result = 0;
1636 Info.u32ClientID = pInfo->u32ClientID;
1637 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1638 return VERR_TOO_MANY_OPEN_FILES;
1639 }
1640 }
1641 if (pcbDataReturned)
1642 *pcbDataReturned = sizeof(*pInfo);
1643 }
1644 return rc;
1645}
1646
1647
1648static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
1649 size_t *pcbDataReturned)
1650{
1651 /*
1652 * Validate the client id and invalidate its entry while we're in the call.
1653 */
1654 int rc;
1655 const uint32_t u32ClientId = pInfo->u32ClientID;
1656 unsigned i;
1657 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1658 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1659 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1660 if (pSession->aHGCMClientIds[i] == u32ClientId)
1661 {
1662 pSession->aHGCMClientIds[i] = UINT32_MAX;
1663 break;
1664 }
1665 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1666 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1667 {
1668 static unsigned s_cErrors = 0;
1669 if (s_cErrors++ > 32)
1670 LogRel(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
1671 return VERR_INVALID_HANDLE;
1672 }
1673
1674 /*
1675 * The VbglHGCMConnect call will invoke the callback if the HGCM
1676 * call is performed in an ASYNC fashion. The function is not able
1677 * to deal with cancelled requests.
1678 */
1679 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
1680 rc = VbglR0HGCMInternalDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1681 if (RT_SUCCESS(rc))
1682 {
1683 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
1684 if (pcbDataReturned)
1685 *pcbDataReturned = sizeof(*pInfo);
1686 }
1687
1688 /* Update the client id array according to the result. */
1689 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1690 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
1691 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
1692 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1693
1694 return rc;
1695}
1696
1697
1698static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt,
1699 PVBOXGUESTSESSION pSession,
1700 VBoxGuestHGCMCallInfo *pInfo,
1701 uint32_t cMillies, bool fInterruptible, bool f32bit,
1702 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
1703{
1704 const uint32_t u32ClientId = pInfo->u32ClientID;
1705 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1706 uint32_t fFlags;
1707 size_t cbActual;
1708 unsigned i;
1709 int rc;
1710
1711 /*
1712 * Some more validations.
1713 */
1714 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
1715 {
1716 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
1717 return VERR_INVALID_PARAMETER;
1718 }
1719
1720 cbActual = cbExtra + sizeof(*pInfo);
1721#ifdef RT_ARCH_AMD64
1722 if (f32bit)
1723 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
1724 else
1725#endif
1726 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
1727 if (cbData < cbActual)
1728 {
1729 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
1730 cbData, cbActual));
1731 return VERR_INVALID_PARAMETER;
1732 }
1733
1734 /*
1735 * Validate the client id.
1736 */
1737 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1738 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1739 if (pSession->aHGCMClientIds[i] == u32ClientId)
1740 break;
1741 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1742 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
1743 {
1744 static unsigned s_cErrors = 0;
1745 if (s_cErrors++ > 32)
1746 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
1747 return VERR_INVALID_HANDLE;
1748 }
1749
1750 /*
1751 * The VbglHGCMCall call will invoke the callback if the HGCM
1752 * call is performed in an ASYNC fashion. This function can
1753 * deal with cancelled requests, so we let user more requests
1754 * be interruptible (should add a flag for this later I guess).
1755 */
1756 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
1757 fFlags = pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
1758#ifdef RT_ARCH_AMD64
1759 if (f32bit)
1760 {
1761 if (fInterruptible)
1762 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1763 else
1764 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1765 }
1766 else
1767#endif
1768 {
1769 if (fInterruptible)
1770 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1771 else
1772 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1773 }
1774 if (RT_SUCCESS(rc))
1775 {
1776 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
1777 if (pcbDataReturned)
1778 *pcbDataReturned = cbActual;
1779 }
1780 else
1781 {
1782 if ( rc != VERR_INTERRUPTED
1783 && rc != VERR_TIMEOUT)
1784 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
1785 else
1786 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
1787 }
1788 return rc;
1789}
1790
1791
1792/**
1793 * @returns VBox status code. Unlike the other HGCM IOCtls this will combine
1794 * the VbglHGCMConnect/Disconnect return code with the Info.result.
1795 *
1796 * @param pDevExt The device extension.
1797 * @param pu32ClientId The client id.
1798 * @param pcbDataReturned Where to store the amount of returned data. Can
1799 * be NULL.
1800 */
1801static int VBoxGuestCommonIOCtl_HGCMClipboardReConnect(PVBOXGUESTDEVEXT pDevExt, uint32_t *pu32ClientId, size_t *pcbDataReturned)
1802{
1803 int rc;
1804 VBoxGuestHGCMConnectInfo CnInfo;
1805
1806 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: Current u32ClientId=%RX32\n", pDevExt->u32ClipboardClientId));
1807
1808 /*
1809 * If there is an old client, try disconnect it first.
1810 */
1811 if (pDevExt->u32ClipboardClientId != 0)
1812 {
1813 VBoxGuestHGCMDisconnectInfo DiInfo;
1814 DiInfo.result = VERR_WRONG_ORDER;
1815 DiInfo.u32ClientID = pDevExt->u32ClipboardClientId;
1816 rc = VbglR0HGCMInternalDisconnect(&DiInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1817 if (RT_SUCCESS(rc))
1818 {
1819 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. VbglHGCMDisconnect -> rc=%Rrc\n", rc));
1820 return rc;
1821 }
1822 if (RT_FAILURE((int32_t)DiInfo.result))
1823 {
1824 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. DiInfo.result=%Rrc\n", DiInfo.result));
1825 return DiInfo.result;
1826 }
1827 pDevExt->u32ClipboardClientId = 0;
1828 }
1829
1830 /*
1831 * Try connect.
1832 */
1833 CnInfo.Loc.type = VMMDevHGCMLoc_LocalHost_Existing;
1834 strcpy(CnInfo.Loc.u.host.achName, "VBoxSharedClipboard");
1835 CnInfo.u32ClientID = 0;
1836 CnInfo.result = VERR_WRONG_ORDER;
1837
1838 rc = VbglR0HGCMInternalConnect(&CnInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1839 if (RT_FAILURE(rc))
1840 {
1841 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1842 return rc;
1843 }
1844 if (RT_FAILURE(CnInfo.result))
1845 {
1846 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1847 return rc;
1848 }
1849
1850 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: connected successfully u32ClientId=%RX32\n", CnInfo.u32ClientID));
1851
1852 pDevExt->u32ClipboardClientId = CnInfo.u32ClientID;
1853 *pu32ClientId = CnInfo.u32ClientID;
1854 if (pcbDataReturned)
1855 *pcbDataReturned = sizeof(uint32_t);
1856
1857 return VINF_SUCCESS;
1858}
1859
1860#endif /* VBOX_WITH_HGCM */
1861
1862/**
1863 * Handle VBOXGUEST_IOCTL_CHECK_BALLOON from R3.
1864 *
1865 * Ask the host for the size of the balloon and try to set it accordingly. If
1866 * this approach fails because it's not supported, return with fHandleInR3 set
1867 * and let the user land supply memory we can lock via the other ioctl.
1868 *
1869 * @returns VBox status code.
1870 *
1871 * @param pDevExt The device extension.
1872 * @param pSession The session.
1873 * @param pInfo The output buffer.
1874 * @param pcbDataReturned Where to store the amount of returned data. Can
1875 * be NULL.
1876 */
1877static int VBoxGuestCommonIOCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1878 VBoxGuestCheckBalloonInfo *pInfo, size_t *pcbDataReturned)
1879{
1880 VMMDevGetMemBalloonChangeRequest *pReq;
1881 int rc;
1882
1883 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON\n"));
1884 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
1885 AssertRCReturn(rc, rc);
1886
1887 /*
1888 * The first user trying to query/change the balloon becomes the
1889 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
1890 */
1891 if ( pDevExt->MemBalloon.pOwner != pSession
1892 && pDevExt->MemBalloon.pOwner == NULL)
1893 pDevExt->MemBalloon.pOwner = pSession;
1894
1895 if (pDevExt->MemBalloon.pOwner == pSession)
1896 {
1897 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
1898 if (RT_SUCCESS(rc))
1899 {
1900 /*
1901 * This is a response to that event. Setting this bit means that
1902 * we request the value from the host and change the guest memory
1903 * balloon according to this value.
1904 */
1905 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
1906 rc = VbglGRPerform(&pReq->header);
1907 if (RT_SUCCESS(rc))
1908 {
1909 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
1910 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
1911
1912 pInfo->cBalloonChunks = pReq->cBalloonChunks;
1913 pInfo->fHandleInR3 = false;
1914
1915 rc = vboxGuestSetBalloonSizeKernel(pDevExt, pReq->cBalloonChunks, &pInfo->fHandleInR3);
1916 /* Ignore various out of memory failures. */
1917 if ( rc == VERR_NO_MEMORY
1918 || rc == VERR_NO_PHYS_MEMORY
1919 || rc == VERR_NO_CONT_MEMORY)
1920 rc = VINF_SUCCESS;
1921
1922 if (pcbDataReturned)
1923 *pcbDataReturned = sizeof(VBoxGuestCheckBalloonInfo);
1924 }
1925 else
1926 LogRel(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON: VbglGRPerform failed. rc=%Rrc\n", rc));
1927 VbglGRFree(&pReq->header);
1928 }
1929 }
1930 else
1931 rc = VERR_PERMISSION_DENIED;
1932
1933 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
1934 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON returns %Rrc\n", rc));
1935 return rc;
1936}
1937
1938
1939/**
1940 * Handle a request for changing the memory balloon.
1941 *
1942 * @returns VBox status code.
1943 *
1944 * @param pDevExt The device extention.
1945 * @param pSession The session.
1946 * @param pInfo The change request structure (input).
1947 * @param pcbDataReturned Where to store the amount of returned data. Can
1948 * be NULL.
1949 */
1950static int VBoxGuestCommonIOCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1951 VBoxGuestChangeBalloonInfo *pInfo, size_t *pcbDataReturned)
1952{
1953 int rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
1954 AssertRCReturn(rc, rc);
1955
1956 if (!pDevExt->MemBalloon.fUseKernelAPI)
1957 {
1958 /*
1959 * The first user trying to query/change the balloon becomes the
1960 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
1961 */
1962 if ( pDevExt->MemBalloon.pOwner != pSession
1963 && pDevExt->MemBalloon.pOwner == NULL)
1964 pDevExt->MemBalloon.pOwner = pSession;
1965
1966 if (pDevExt->MemBalloon.pOwner == pSession)
1967 {
1968 rc = vboxGuestSetBalloonSizeFromUser(pDevExt, pSession,
1969 pInfo->u64ChunkAddr,
1970 pInfo->fInflate > 0 ? true : false);
1971 if (pcbDataReturned)
1972 *pcbDataReturned = 0;
1973 }
1974 else
1975 rc = VERR_PERMISSION_DENIED;
1976 }
1977 else
1978 rc = VERR_PERMISSION_DENIED;
1979
1980 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
1981 return rc;
1982}
1983
1984
1985#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
1986/**
1987 * Enables the VRDP session and saves its session ID.
1988 *
1989 * @returns VBox status code.
1990 *
1991 * @param pDevExt The device extention.
1992 * @param pSession The session.
1993 */
1994static int VBoxGuestCommonIOCtl_EnableVRDPSession(VBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1995{
1996 /* Nothing to do here right now, since this only is supported on Windows at the moment. */
1997 return VERR_NOT_IMPLEMENTED;
1998}
1999
2000
2001/**
2002 * Disables the VRDP session.
2003 *
2004 * @returns VBox status code.
2005 *
2006 * @param pDevExt The device extention.
2007 * @param pSession The session.
2008 */
2009static int VBoxGuestCommonIOCtl_DisableVRDPSession(VBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
2010{
2011 /* Nothing to do here right now, since this only is supported on Windows at the moment. */
2012 return VERR_NOT_IMPLEMENTED;
2013}
2014#endif /* VBOX_WITH_VRDP_SESSION_HANDLING */
2015
2016
2017/**
2018 * Guest backdoor logging.
2019 *
2020 * @returns VBox status code.
2021 *
2022 * @param pch The log message (need not be NULL terminated).
2023 * @param cbData Size of the buffer.
2024 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2025 */
2026static int VBoxGuestCommonIOCtl_Log(const char *pch, size_t cbData, size_t *pcbDataReturned)
2027{
2028 NOREF(pch);
2029 NOREF(cbData);
2030 Log(("%.*s", cbData, pch));
2031 if (pcbDataReturned)
2032 *pcbDataReturned = 0;
2033 return VINF_SUCCESS;
2034}
2035
2036
2037/**
2038 * Common IOCtl for user to kernel and kernel to kernel communcation.
2039 *
2040 * This function only does the basic validation and then invokes
2041 * worker functions that takes care of each specific function.
2042 *
2043 * @returns VBox status code.
2044 *
2045 * @param iFunction The requested function.
2046 * @param pDevExt The device extension.
2047 * @param pSession The client session.
2048 * @param pvData The input/output data buffer. Can be NULL depending on the function.
2049 * @param cbData The max size of the data buffer.
2050 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2051 */
2052int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2053 void *pvData, size_t cbData, size_t *pcbDataReturned)
2054{
2055 int rc;
2056 Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
2057 iFunction, pDevExt, pSession, pvData, cbData));
2058
2059 /*
2060 * Make sure the returned data size is set to zero.
2061 */
2062 if (pcbDataReturned)
2063 *pcbDataReturned = 0;
2064
2065 /*
2066 * Define some helper macros to simplify validation.
2067 */
2068#define CHECKRET_RING0(mnemonic) \
2069 do { \
2070 if (pSession->R0Process != NIL_RTR0PROCESS) \
2071 { \
2072 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
2073 pSession->Process, (uintptr_t)pSession->R0Process)); \
2074 return VERR_PERMISSION_DENIED; \
2075 } \
2076 } while (0)
2077#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
2078 do { \
2079 if (cbData < (cbMin)) \
2080 { \
2081 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
2082 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
2083 return VERR_BUFFER_OVERFLOW; \
2084 } \
2085 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
2086 { \
2087 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": Invalid pointer %p\n", pvData)); \
2088 return VERR_INVALID_POINTER; \
2089 } \
2090 } while (0)
2091
2092
2093 /*
2094 * Deal with variably sized requests first.
2095 */
2096 rc = VINF_SUCCESS;
2097 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
2098 {
2099 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
2100 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
2101 }
2102#ifdef VBOX_WITH_HGCM
2103 /*
2104 * These ones are a bit tricky.
2105 */
2106 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
2107 {
2108 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2109 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2110 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2111 fInterruptible, false /*f32bit*/,
2112 0, cbData, pcbDataReturned);
2113 }
2114 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
2115 {
2116 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2117 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2118 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2119 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2120 false /*f32bit*/,
2121 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2122 }
2123# ifdef RT_ARCH_AMD64
2124 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
2125 {
2126 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2127 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2128 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2129 fInterruptible, true /*f32bit*/,
2130 0, cbData, pcbDataReturned);
2131 }
2132 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
2133 {
2134 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2135 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2136 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2137 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2138 true /*f32bit*/,
2139 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2140 }
2141# endif
2142#endif /* VBOX_WITH_HGCM */
2143 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
2144 {
2145 CHECKRET_MIN_SIZE("LOG", 1);
2146 rc = VBoxGuestCommonIOCtl_Log((char *)pvData, cbData, pcbDataReturned);
2147 }
2148 else
2149 {
2150 switch (iFunction)
2151 {
2152 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
2153 CHECKRET_RING0("GETVMMDEVPORT");
2154 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
2155 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
2156 break;
2157
2158 case VBOXGUEST_IOCTL_WAITEVENT:
2159 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
2160 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, pSession, (VBoxGuestWaitEventInfo *)pvData,
2161 pcbDataReturned, pSession->R0Process != NIL_RTR0PROCESS);
2162 break;
2163
2164 case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
2165 if (cbData != 0)
2166 rc = VERR_INVALID_PARAMETER;
2167 rc = VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
2168 break;
2169
2170 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
2171 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
2172 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, (VBoxGuestFilterMaskInfo *)pvData);
2173 break;
2174
2175#ifdef VBOX_WITH_HGCM
2176 case VBOXGUEST_IOCTL_HGCM_CONNECT:
2177# ifdef RT_ARCH_AMD64
2178 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
2179# endif
2180 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
2181 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
2182 break;
2183
2184 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
2185# ifdef RT_ARCH_AMD64
2186 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
2187# endif
2188 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
2189 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
2190 break;
2191
2192 case VBOXGUEST_IOCTL_CLIPBOARD_CONNECT:
2193 CHECKRET_MIN_SIZE("CLIPBOARD_CONNECT", sizeof(uint32_t));
2194 rc = VBoxGuestCommonIOCtl_HGCMClipboardReConnect(pDevExt, (uint32_t *)pvData, pcbDataReturned);
2195 break;
2196#endif /* VBOX_WITH_HGCM */
2197
2198 case VBOXGUEST_IOCTL_CHECK_BALLOON:
2199 CHECKRET_MIN_SIZE("CHECK_MEMORY_BALLOON", sizeof(VBoxGuestCheckBalloonInfo));
2200 rc = VBoxGuestCommonIOCtl_CheckMemoryBalloon(pDevExt, pSession, (VBoxGuestCheckBalloonInfo *)pvData, pcbDataReturned);
2201 break;
2202
2203 case VBOXGUEST_IOCTL_CHANGE_BALLOON:
2204 CHECKRET_MIN_SIZE("CHANGE_MEMORY_BALLOON", sizeof(VBoxGuestChangeBalloonInfo));
2205 rc = VBoxGuestCommonIOCtl_ChangeMemoryBalloon(pDevExt, pSession, (VBoxGuestChangeBalloonInfo *)pvData, pcbDataReturned);
2206 break;
2207
2208#ifdef VBOX_WITH_VRDP_SESSION_HANDLING
2209 case VBOXGUEST_IOCTL_ENABLE_VRDP_SESSION:
2210 rc = VBoxGuestCommonIOCtl_EnableVRDPSession(pDevExt, pSession);
2211 break;
2212
2213 case VBOXGUEST_IOCTL_DISABLE_VRDP_SESSION:
2214 rc = VBoxGuestCommonIOCtl_DisableVRDPSession(pDevExt, pSession);
2215 break;
2216#endif /* VBOX_WITH_VRDP_SESSION_HANDLING */
2217
2218 default:
2219 {
2220 LogRel(("VBoxGuestCommonIOCtl: Unknown request iFunction=%#x Stripped size=%#x\n", iFunction,
2221 VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
2222 rc = VERR_NOT_SUPPORTED;
2223 break;
2224 }
2225 }
2226 }
2227
2228 Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
2229 return rc;
2230}
2231
2232
2233
2234/**
2235 * Common interrupt service routine.
2236 *
2237 * This deals with events and with waking up thread waiting for those events.
2238 *
2239 * @returns true if it was our interrupt, false if it wasn't.
2240 * @param pDevExt The VBoxGuest device extension.
2241 */
2242bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
2243{
2244 bool fMousePositionChanged = false;
2245 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
2246 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
2247 int rc = 0;
2248 bool fOurIrq;
2249
2250 /*
2251 * Make sure we've initalized the device extension.
2252 */
2253 if (RT_UNLIKELY(!pReq))
2254 return false;
2255
2256 /*
2257 * Enter the spinlock and check if it's our IRQ or not.
2258 *
2259 * Note! Solaris cannot do RTSemEventMultiSignal with interrupts disabled
2260 * so we're entering the spinlock without disabling them. This works
2261 * fine as long as we never called in a nested fashion.
2262 */
2263#if defined(RT_OS_SOLARIS)
2264 RTSpinlockAcquire(pDevExt->EventSpinlock, &Tmp);
2265#else
2266 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
2267#endif
2268 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
2269 if (fOurIrq)
2270 {
2271 /*
2272 * Acknowlegde events.
2273 * We don't use VbglGRPerform here as it may take another spinlocks.
2274 */
2275 pReq->header.rc = VERR_INTERNAL_ERROR;
2276 pReq->events = 0;
2277 ASMCompilerBarrier();
2278 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
2279 ASMCompilerBarrier(); /* paranoia */
2280 if (RT_SUCCESS(pReq->header.rc))
2281 {
2282 uint32_t fEvents = pReq->events;
2283 PVBOXGUESTWAIT pWait;
2284
2285 Log(("VBoxGuestCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
2286
2287 /*
2288 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
2289 */
2290 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
2291 {
2292 fMousePositionChanged = true;
2293 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
2294 }
2295
2296#ifdef VBOX_WITH_HGCM
2297 /*
2298 * The HGCM event/list is kind of different in that we evaluate all entries.
2299 */
2300 if (fEvents & VMMDEV_EVENT_HGCM)
2301 {
2302 for (pWait = pDevExt->HGCMWaitList.pHead; pWait; pWait = pWait->pNext)
2303 if ( !pWait->fResEvents
2304 && (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE))
2305 {
2306 pWait->fResEvents = VMMDEV_EVENT_HGCM;
2307 rc |= RTSemEventMultiSignal(pWait->Event);
2308 }
2309 fEvents &= ~VMMDEV_EVENT_HGCM;
2310 }
2311#endif
2312
2313 /*
2314 * Normal FIFO waiter evaluation.
2315 */
2316 fEvents |= pDevExt->f32PendingEvents;
2317 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
2318 if ( (pWait->fReqEvents & fEvents)
2319 && !pWait->fResEvents)
2320 {
2321 pWait->fResEvents = pWait->fReqEvents & fEvents;
2322 fEvents &= ~pWait->fResEvents;
2323 rc |= RTSemEventMultiSignal(pWait->Event);
2324 if (!fEvents)
2325 break;
2326 }
2327 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
2328 }
2329 else /* something is serious wrong... */
2330 Log(("VBoxGuestCommonISR: acknowledge events failed rc=%Rrc (events=%#x)!!\n",
2331 pReq->header.rc, pReq->events));
2332 }
2333 else
2334 LogFlow(("VBoxGuestCommonISR: not ours\n"));
2335
2336 /*
2337 * Work the poll and async notification queues on OSes that implements that.
2338 * Do this outside the spinlock to prevent some recursive spinlocking.
2339 */
2340#if defined(RT_OS_SOLARIS)
2341 RTSpinlockRelease(pDevExt->EventSpinlock, &Tmp);
2342#else
2343 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
2344#endif
2345
2346 if (fMousePositionChanged)
2347 {
2348 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
2349 VBoxGuestNativeISRMousePollEvent(pDevExt);
2350 }
2351
2352 Assert(rc == 0);
2353 return fOurIrq;
2354}
2355
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette