VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 31364

Last change on this file since 31364 was 31364, checked in by vboxsync, 15 years ago

Some more fixes for Guest Additions version lookup/status; moved some duplicate helper function to VbglR0.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 81.1 KB
Line 
1/* $Id: VBoxGuest.cpp 31364 2010-08-04 16:44:20Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEFAULT
23#include "VBoxGuestInternal.h"
24#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
25#include <VBox/log.h>
26#include <iprt/mem.h>
27#include <iprt/time.h>
28#include <iprt/memobj.h>
29#include <iprt/asm.h>
30#include <iprt/asm-amd64-x86.h>
31#include <iprt/string.h>
32#include <iprt/process.h>
33#include <iprt/assert.h>
34#include <iprt/param.h>
35#ifdef VBOX_WITH_HGCM
36# include <iprt/thread.h>
37#endif
38#include "version-generated.h"
39#if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
40# include "revision-generated.h"
41#endif
42
43
44/*******************************************************************************
45* Internal Functions *
46*******************************************************************************/
47#ifdef VBOX_WITH_HGCM
48static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
49#endif
50
51
52/*******************************************************************************
53* Global Variables *
54*******************************************************************************/
55static const size_t cbChangeMemBalloonReq = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
56
57
58
59/**
60 * Reserves memory in which the VMM can relocate any guest mappings
61 * that are floating around.
62 *
63 * This operation is a little bit tricky since the VMM might not accept
64 * just any address because of address clashes between the three contexts
65 * it operates in, so use a small stack to perform this operation.
66 *
67 * @returns VBox status code (ignored).
68 * @param pDevExt The device extension.
69 */
70static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
71{
72 /*
73 * Query the required space.
74 */
75 VMMDevReqHypervisorInfo *pReq;
76 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
77 if (RT_FAILURE(rc))
78 return rc;
79 pReq->hypervisorStart = 0;
80 pReq->hypervisorSize = 0;
81 rc = VbglGRPerform(&pReq->header);
82 if (RT_FAILURE(rc)) /* this shouldn't happen! */
83 {
84 VbglGRFree(&pReq->header);
85 return rc;
86 }
87
88 /*
89 * The VMM will report back if there is nothing it wants to map, like for
90 * insance in VT-x and AMD-V mode.
91 */
92 if (pReq->hypervisorSize == 0)
93 Log(("vboxGuestInitFixateGuestMappings: nothing to do\n"));
94 else
95 {
96 /*
97 * We have to try several times since the host can be picky
98 * about certain addresses.
99 */
100 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
101 uint32_t cbHypervisor = pReq->hypervisorSize;
102 RTR0MEMOBJ ahTries[5];
103 uint32_t iTry;
104 bool fBitched = false;
105 Log(("vboxGuestInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
106 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
107 {
108 /*
109 * Reserve space, or if that isn't supported, create a object for
110 * some fictive physical memory and map that in to kernel space.
111 *
112 * To make the code a bit uglier, most systems cannot help with
113 * 4MB alignment, so we have to deal with that in addition to
114 * having two ways of getting the memory.
115 */
116 uint32_t uAlignment = _4M;
117 RTR0MEMOBJ hObj;
118 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
119 if (rc == VERR_NOT_SUPPORTED)
120 {
121 uAlignment = PAGE_SIZE;
122 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
123 }
124 if (rc == VERR_NOT_SUPPORTED)
125 {
126 if (hFictive == NIL_RTR0MEMOBJ)
127 {
128 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
129 if (RT_FAILURE(rc))
130 break;
131 hFictive = hObj;
132 }
133 uAlignment = _4M;
134 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
135 if (rc == VERR_NOT_SUPPORTED)
136 {
137 uAlignment = PAGE_SIZE;
138 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
139 }
140 }
141 if (RT_FAILURE(rc))
142 {
143 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
144 rc, cbHypervisor, uAlignment, iTry));
145 fBitched = true;
146 break;
147 }
148
149 /*
150 * Try set it.
151 */
152 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
153 pReq->header.rc = VERR_INTERNAL_ERROR;
154 pReq->hypervisorSize = cbHypervisor;
155 pReq->hypervisorStart = (uintptr_t)RTR0MemObjAddress(hObj);
156 if ( uAlignment == PAGE_SIZE
157 && pReq->hypervisorStart & (_4M - 1))
158 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
159 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
160
161 rc = VbglGRPerform(&pReq->header);
162 if (RT_SUCCESS(rc))
163 {
164 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
165 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
166 RTR0MemObjAddress(pDevExt->hGuestMappings),
167 RTR0MemObjSize(pDevExt->hGuestMappings),
168 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
169 break;
170 }
171 ahTries[iTry] = hObj;
172 }
173
174 /*
175 * Cleanup failed attempts.
176 */
177 while (iTry-- > 0)
178 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
179 if ( RT_FAILURE(rc)
180 && hFictive != NIL_RTR0PTR)
181 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
182 if (RT_FAILURE(rc) && !fBitched)
183 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
184 }
185 VbglGRFree(&pReq->header);
186
187 /*
188 * We ignore failed attempts for now.
189 */
190 return VINF_SUCCESS;
191}
192
193
194/**
195 * Undo what vboxGuestInitFixateGuestMappings did.
196 *
197 * @param pDevExt The device extension.
198 */
199static void vboxGuestTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
200{
201 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
202 {
203 /*
204 * Tell the host that we're going to free the memory we reserved for
205 * it, the free it up. (Leak the memory if anything goes wrong here.)
206 */
207 VMMDevReqHypervisorInfo *pReq;
208 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
209 if (RT_SUCCESS(rc))
210 {
211 pReq->hypervisorStart = 0;
212 pReq->hypervisorSize = 0;
213 rc = VbglGRPerform(&pReq->header);
214 VbglGRFree(&pReq->header);
215 }
216 if (RT_SUCCESS(rc))
217 {
218 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
219 AssertRC(rc);
220 }
221 else
222 LogRel(("vboxGuestTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
223
224 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
225 }
226}
227
228
229/**
230 * Sets the interrupt filter mask during initialization and termination.
231 *
232 * This will ASSUME that we're the ones in carge over the mask, so
233 * we'll simply clear all bits we don't set.
234 *
235 * @returns VBox status code (ignored).
236 * @param pDevExt The device extension.
237 * @param fMask The new mask.
238 */
239static int vboxGuestSetFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask)
240{
241 VMMDevCtlGuestFilterMask *pReq;
242 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
243 if (RT_SUCCESS(rc))
244 {
245 pReq->u32OrMask = fMask;
246 pReq->u32NotMask = ~fMask;
247 rc = VbglGRPerform(&pReq->header);
248 if (RT_FAILURE(rc))
249 LogRel(("vboxGuestSetFilterMask: failed with rc=%Rrc\n", rc));
250 VbglGRFree(&pReq->header);
251 }
252 return rc;
253}
254
255
256/**
257 * Inflate the balloon by one chunk represented by an R0 memory object.
258 *
259 * The caller owns the balloon mutex.
260 *
261 * @returns IPRT status code.
262 * @param pMemObj Pointer to the R0 memory object.
263 * @param pReq The pre-allocated request for performing the VMMDev call.
264 */
265static int vboxGuestBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
266{
267 uint32_t iPage;
268 int rc;
269
270 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
271 {
272 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
273 pReq->aPhysPage[iPage] = phys;
274 }
275
276 /* Protect this memory from being accessed. Doesn't work on every platform and probably
277 * doesn't work for R3-provided memory, therefore ignore the return value. Unprotect
278 * done when object is freed. */
279 RTR0MemObjProtect(*pMemObj, 0, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, RTMEM_PROT_NONE);
280
281 pReq->fInflate = true;
282 pReq->header.size = cbChangeMemBalloonReq;
283 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
284
285 rc = VbglGRPerform(&pReq->header);
286 if (RT_FAILURE(rc))
287 LogRel(("vboxGuestBalloonInflate: VbglGRPerform failed. rc=%Rrc\n", rc));
288 return rc;
289}
290
291
292/**
293 * Deflate the balloon by one chunk - info the host and free the memory object.
294 *
295 * The caller owns the balloon mutex.
296 *
297 * @returns IPRT status code.
298 * @param pMemObj Pointer to the R0 memory object.
299 * The memory object will be freed afterwards.
300 * @param pReq The pre-allocated request for performing the VMMDev call.
301 */
302static int vboxGuestBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
303{
304 uint32_t iPage;
305 int rc;
306
307 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
308 {
309 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
310 pReq->aPhysPage[iPage] = phys;
311 }
312
313 pReq->fInflate = false;
314 pReq->header.size = cbChangeMemBalloonReq;
315 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
316
317 rc = VbglGRPerform(&pReq->header);
318 if (RT_FAILURE(rc))
319 {
320 LogRel(("vboxGuestBalloonDeflate: VbglGRPerform failed. rc=%Rrc\n", rc));
321 return rc;
322 }
323
324 /* undo previous protec call, ignore rc for reasons stated there. */
325 RTR0MemObjProtect(*pMemObj, 0, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
326 /*RTR0MemObjProtect(*pMemObj, 0, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC); - probably not safe... */
327
328 rc = RTR0MemObjFree(*pMemObj, true);
329 if (RT_FAILURE(rc))
330 {
331 LogRel(("vboxGuestBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
332 return rc;
333 }
334
335 *pMemObj = NIL_RTR0MEMOBJ;
336 return VINF_SUCCESS;
337}
338
339
340/**
341 * Inflate/deflate the memory balloon and notify the host.
342 *
343 * This is a worker used by VBoxGuestCommonIOCtl_CheckMemoryBalloon - it takes
344 * the mutex.
345 *
346 * @returns VBox status code.
347 * @param pDevExt The device extension.
348 * @param pSession The session.
349 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
350 * @param pfHandleInR3 Where to return the handle-in-ring3 indicator
351 * (VINF_SUCCESS if set).
352 */
353static int vboxGuestSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, uint32_t *pfHandleInR3)
354{
355 int rc = VINF_SUCCESS;
356
357 if (pDevExt->MemBalloon.fUseKernelAPI)
358 {
359 VMMDevChangeMemBalloon *pReq;
360 uint32_t i;
361
362 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
363 {
364 LogRel(("vboxGuestSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
365 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
366 return VERR_INVALID_PARAMETER;
367 }
368
369 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
370 return VINF_SUCCESS; /* nothing to do */
371
372 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
373 && !pDevExt->MemBalloon.paMemObj)
374 {
375 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
376 if (!pDevExt->MemBalloon.paMemObj)
377 {
378 LogRel(("VBoxGuestSetBalloonSizeKernel: no memory for paMemObj!\n"));
379 return VERR_NO_MEMORY;
380 }
381 }
382
383 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
384 if (RT_FAILURE(rc))
385 return rc;
386
387 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
388 {
389 /* inflate */
390 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
391 {
392 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
393 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
394 if (RT_FAILURE(rc))
395 {
396 if (rc == VERR_NOT_SUPPORTED)
397 {
398 /* not supported -- fall back to the R3-allocated memory. */
399 rc = VINF_SUCCESS;
400 pDevExt->MemBalloon.fUseKernelAPI = false;
401 Assert(pDevExt->MemBalloon.cChunks == 0);
402 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
403 }
404 /* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
405 * cannot allocate more memory => don't try further, just stop here */
406 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
407 break;
408 }
409
410 rc = vboxGuestBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
411 if (RT_FAILURE(rc))
412 {
413 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
414 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
415 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
416 break;
417 }
418 pDevExt->MemBalloon.cChunks++;
419 }
420 }
421 else
422 {
423 /* deflate */
424 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
425 {
426 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
427 if (RT_FAILURE(rc))
428 {
429 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
430 break;
431 }
432 pDevExt->MemBalloon.cChunks--;
433 }
434 }
435
436 VbglGRFree(&pReq->header);
437 }
438
439 /*
440 * Set the handle-in-ring3 indicator. When set Ring-3 will have to work
441 * the balloon changes via the other API.
442 */
443 *pfHandleInR3 = pDevExt->MemBalloon.fUseKernelAPI ? false : true;
444
445 return rc;
446}
447
448
449/**
450 * Inflate/deflate the balloon by one chunk.
451 *
452 * Worker for VBoxGuestCommonIOCtl_ChangeMemoryBalloon - it takes the mutex.
453 *
454 * @returns VBox status code.
455 * @param pDevExt The device extension.
456 * @param pSession The session.
457 * @param u64ChunkAddr The address of the chunk to add to / remove from the
458 * balloon.
459 * @param fInflate Inflate if true, deflate if false.
460 */
461static int vboxGuestSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
462 uint64_t u64ChunkAddr, bool fInflate)
463{
464 VMMDevChangeMemBalloon *pReq;
465 int rc = VINF_SUCCESS;
466 uint32_t i;
467 PRTR0MEMOBJ pMemObj = NULL;
468
469 if (fInflate)
470 {
471 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
472 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
473 {
474 LogRel(("vboxGuestSetBalloonSize: cannot inflate balloon, already have %u chunks (max=%u)\n",
475 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
476 return VERR_INVALID_PARAMETER;
477 }
478
479 if (!pDevExt->MemBalloon.paMemObj)
480 {
481 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
482 if (!pDevExt->MemBalloon.paMemObj)
483 {
484 LogRel(("VBoxGuestSetBalloonSizeFromUser: no memory for paMemObj!\n"));
485 return VERR_NO_MEMORY;
486 }
487 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
488 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
489 }
490 }
491 else
492 {
493 if (pDevExt->MemBalloon.cChunks == 0)
494 {
495 AssertMsgFailed(("vboxGuestSetBalloonSize: cannot decrease balloon, already at size 0\n"));
496 return VERR_INVALID_PARAMETER;
497 }
498 }
499
500 /*
501 * Enumerate all memory objects and check if the object is already registered.
502 */
503 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
504 {
505 if ( fInflate
506 && !pMemObj
507 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
508 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
509 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == u64ChunkAddr)
510 {
511 if (fInflate)
512 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
513 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
514 break;
515 }
516 }
517 if (!pMemObj)
518 {
519 if (fInflate)
520 {
521 /* no free object pointer found -- should not happen */
522 return VERR_NO_MEMORY;
523 }
524
525 /* cannot free this memory as it wasn't provided before */
526 return VERR_NOT_FOUND;
527 }
528
529 /*
530 * Try inflate / defalte the balloon as requested.
531 */
532 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
533 if (RT_FAILURE(rc))
534 return rc;
535
536 if (fInflate)
537 {
538 rc = RTR0MemObjLockUser(pMemObj, u64ChunkAddr, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
539 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
540 if (RT_SUCCESS(rc))
541 {
542 rc = vboxGuestBalloonInflate(pMemObj, pReq);
543 if (RT_SUCCESS(rc))
544 pDevExt->MemBalloon.cChunks++;
545 else
546 {
547 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
548 RTR0MemObjFree(*pMemObj, true);
549 *pMemObj = NIL_RTR0MEMOBJ;
550 }
551 }
552 }
553 else
554 {
555 rc = vboxGuestBalloonDeflate(pMemObj, pReq);
556 if (RT_SUCCESS(rc))
557 pDevExt->MemBalloon.cChunks--;
558 else
559 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
560 }
561
562 VbglGRFree(&pReq->header);
563 return rc;
564}
565
566
567/**
568 * Cleanup the memory balloon of a session.
569 *
570 * Will request the balloon mutex, so it must be valid and the caller must not
571 * own it already.
572 *
573 * @param pDevExt The device extension.
574 * @param pDevExt The session. Can be NULL at unload.
575 */
576static void vboxGuestCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
577{
578 RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
579 if ( pDevExt->MemBalloon.pOwner == pSession
580 || pSession == NULL /*unload*/)
581 {
582 if (pDevExt->MemBalloon.paMemObj)
583 {
584 VMMDevChangeMemBalloon *pReq;
585 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
586 if (RT_SUCCESS(rc))
587 {
588 uint32_t i;
589 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
590 {
591 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
592 if (RT_FAILURE(rc))
593 {
594 LogRel(("vboxGuestCloseMemBalloon: Deflate failed with rc=%Rrc. Will leak %u chunks.\n",
595 rc, pDevExt->MemBalloon.cChunks));
596 break;
597 }
598 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
599 pDevExt->MemBalloon.cChunks--;
600 }
601 VbglGRFree(&pReq->header);
602 }
603 else
604 LogRel(("vboxGuestCloseMemBalloon: Failed to allocate VMMDev request buffer (rc=%Rrc). Will leak %u chunks.\n",
605 rc, pDevExt->MemBalloon.cChunks));
606 RTMemFree(pDevExt->MemBalloon.paMemObj);
607 pDevExt->MemBalloon.paMemObj = NULL;
608 }
609
610 pDevExt->MemBalloon.pOwner = NULL;
611 }
612 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
613}
614
615
616/**
617 * Initializes the VBoxGuest device extension when the
618 * device driver is loaded.
619 *
620 * The native code locates the VMMDev on the PCI bus and retrieve
621 * the MMIO and I/O port ranges, this function will take care of
622 * mapping the MMIO memory (if present). Upon successful return
623 * the native code should set up the interrupt handler.
624 *
625 * @returns VBox status code.
626 *
627 * @param pDevExt The device extension. Allocated by the native code.
628 * @param IOPortBase The base of the I/O port range.
629 * @param pvMMIOBase The base of the MMIO memory mapping.
630 * This is optional, pass NULL if not present.
631 * @param cbMMIO The size of the MMIO memory mapping.
632 * This is optional, pass 0 if not present.
633 * @param enmOSType The guest OS type to report to the VMMDev.
634 * @param fFixedEvents Events that will be enabled upon init and no client
635 * will ever be allowed to mask.
636 */
637int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
638 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
639{
640 int rc, rc2;
641
642 /*
643 * Adjust fFixedEvents.
644 */
645#ifdef VBOX_WITH_HGCM
646 fFixedEvents |= VMMDEV_EVENT_HGCM;
647#endif
648
649 /*
650 * Initalize the data.
651 */
652 pDevExt->IOPortBase = IOPortBase;
653 pDevExt->pVMMDevMemory = NULL;
654 pDevExt->fFixedEvents = fFixedEvents;
655 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
656 pDevExt->EventSpinlock = NIL_RTSPINLOCK;
657 pDevExt->pIrqAckEvents = NULL;
658 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
659 pDevExt->WaitList.pHead = NULL;
660 pDevExt->WaitList.pTail = NULL;
661#ifdef VBOX_WITH_HGCM
662 pDevExt->HGCMWaitList.pHead = NULL;
663 pDevExt->HGCMWaitList.pTail = NULL;
664#endif
665 pDevExt->FreeList.pHead = NULL;
666 pDevExt->FreeList.pTail = NULL;
667 pDevExt->f32PendingEvents = 0;
668 pDevExt->u32MousePosChangedSeq = 0;
669 pDevExt->SessionSpinlock = NIL_RTSPINLOCK;
670 pDevExt->u32ClipboardClientId = 0;
671 pDevExt->MemBalloon.hMtx = NIL_RTSEMFASTMUTEX;
672 pDevExt->MemBalloon.cChunks = 0;
673 pDevExt->MemBalloon.cMaxChunks = 0;
674 pDevExt->MemBalloon.fUseKernelAPI = true;
675 pDevExt->MemBalloon.paMemObj = NULL;
676 pDevExt->MemBalloon.pOwner = NULL;
677
678 /*
679 * If there is an MMIO region validate the version and size.
680 */
681 if (pvMMIOBase)
682 {
683 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
684 Assert(cbMMIO);
685 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
686 && pVMMDev->u32Size >= 32
687 && pVMMDev->u32Size <= cbMMIO)
688 {
689 pDevExt->pVMMDevMemory = pVMMDev;
690 Log(("VBoxGuestInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
691 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
692 }
693 else /* try live without it. */
694 LogRel(("VBoxGuestInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
695 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
696 }
697
698 /*
699 * Create the wait and session spinlocks as well as the ballooning mutex.
700 */
701 rc = RTSpinlockCreate(&pDevExt->EventSpinlock);
702 if (RT_SUCCESS(rc))
703 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock);
704 if (RT_FAILURE(rc))
705 {
706 LogRel(("VBoxGuestInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
707 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
708 RTSpinlockDestroy(pDevExt->EventSpinlock);
709 return rc;
710 }
711
712 rc = RTSemFastMutexCreate(&pDevExt->MemBalloon.hMtx);
713 if (RT_FAILURE(rc))
714 {
715 LogRel(("VBoxGuestInitDevExt: failed to create mutex, rc=%Rrc!\n", rc));
716 RTSpinlockDestroy(pDevExt->SessionSpinlock);
717 RTSpinlockDestroy(pDevExt->EventSpinlock);
718 return rc;
719 }
720
721 /*
722 * Initialize the guest library and report the guest info back to VMMDev,
723 * set the interrupt control filter mask, and fixate the guest mappings
724 * made by the VMM.
725 */
726 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
727 if (RT_SUCCESS(rc))
728 {
729 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
730 if (RT_SUCCESS(rc))
731 {
732 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
733 Assert(pDevExt->PhysIrqAckEvents != 0);
734
735 rc = vboxGuestSetFilterMask(pDevExt, fFixedEvents);
736 if (RT_SUCCESS(rc))
737 {
738 /*
739 * Disable guest graphics capability by default. The guest specific
740 * graphics driver will re-enable this when it is necessary.
741 */
742 rc = VBoxGuestSetGuestCapabilities(0, VMMDEV_GUEST_SUPPORTS_GRAPHICS);
743 if (RT_SUCCESS(rc))
744 {
745 vboxGuestInitFixateGuestMappings(pDevExt);
746
747 rc = VbglR0MiscReportGuestInfo(enmOSType);
748 if (RT_FAILURE(rc))
749 LogRel(("VBoxGuestInitDevExt: VbglR0MiscReportGuestInfo failed, rc=%Rrc\n", rc));
750
751 Log(("VBoxGuestInitDevExt: returns success\n"));
752 return VINF_SUCCESS;
753 }
754
755 LogRel(("VBoxGuestInitDevExt: VBoxGuestSetGuestCapabilities failed, rc=%Rrc\n", rc));
756 }
757 else
758 LogRel(("VBoxGuestInitDevExt: vboxGuestSetFilterMask failed, rc=%Rrc\n", rc));
759 VbglGRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
760 }
761 else
762 LogRel(("VBoxGuestInitDevExt: VBoxGRAlloc failed, rc=%Rrc\n", rc));
763
764 VbglTerminate();
765 }
766 else
767 LogRel(("VBoxGuestInitDevExt: VbglInit failed, rc=%Rrc\n", rc));
768
769 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
770 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
771 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
772 return rc; /* (failed) */
773}
774
775
776/**
777 * Deletes all the items in a wait chain.
778 * @param pWait The head of the chain.
779 */
780static void VBoxGuestDeleteWaitList(PVBOXGUESTWAITLIST pList)
781{
782 while (pList->pHead)
783 {
784 int rc2;
785 PVBOXGUESTWAIT pWait = pList->pHead;
786 pList->pHead = pWait->pNext;
787
788 pWait->pNext = NULL;
789 pWait->pPrev = NULL;
790 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
791 pWait->Event = NIL_RTSEMEVENTMULTI;
792 pWait->pSession = NULL;
793 RTMemFree(pWait);
794 }
795 pList->pHead = NULL;
796 pList->pTail = NULL;
797}
798
799
800/**
801 * Destroys the VBoxGuest device extension.
802 *
803 * The native code should call this before the driver is loaded,
804 * but don't call this on shutdown.
805 *
806 * @param pDevExt The device extension.
807 */
808void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
809{
810 int rc2;
811 Log(("VBoxGuestDeleteDevExt:\n"));
812 Log(("VBoxGuest: The additions driver is terminating.\n"));
813
814 /*
815 * Clean up the bits that involves the host first.
816 */
817 vboxGuestTermUnfixGuestMappings(pDevExt);
818 VBoxGuestSetGuestCapabilities(0, UINT32_MAX); /* clears all capabilities */
819 vboxGuestSetFilterMask(pDevExt, 0); /* filter all events */
820 vboxGuestCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
821
822 /*
823 * Cleanup all the other resources.
824 */
825 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
826 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
827 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
828
829 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
830#ifdef VBOX_WITH_HGCM
831 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
832#endif
833 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
834
835 VbglTerminate();
836
837 pDevExt->pVMMDevMemory = NULL;
838
839 pDevExt->IOPortBase = 0;
840 pDevExt->pIrqAckEvents = NULL;
841}
842
843
844/**
845 * Creates a VBoxGuest user session.
846 *
847 * The native code calls this when a ring-3 client opens the device.
848 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
849 *
850 * @returns VBox status code.
851 * @param pDevExt The device extension.
852 * @param ppSession Where to store the session on success.
853 */
854int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
855{
856 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
857 if (RT_UNLIKELY(!pSession))
858 {
859 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
860 return VERR_NO_MEMORY;
861 }
862
863 pSession->Process = RTProcSelf();
864 pSession->R0Process = RTR0ProcHandleSelf();
865 pSession->pDevExt = pDevExt;
866
867 *ppSession = pSession;
868 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
869 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
870 return VINF_SUCCESS;
871}
872
873
874/**
875 * Creates a VBoxGuest kernel session.
876 *
877 * The native code calls this when a ring-0 client connects to the device.
878 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
879 *
880 * @returns VBox status code.
881 * @param pDevExt The device extension.
882 * @param ppSession Where to store the session on success.
883 */
884int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
885{
886 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
887 if (RT_UNLIKELY(!pSession))
888 {
889 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
890 return VERR_NO_MEMORY;
891 }
892
893 pSession->Process = NIL_RTPROCESS;
894 pSession->R0Process = NIL_RTR0PROCESS;
895 pSession->pDevExt = pDevExt;
896
897 *ppSession = pSession;
898 LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
899 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
900 return VINF_SUCCESS;
901}
902
903
904
905/**
906 * Closes a VBoxGuest session.
907 *
908 * @param pDevExt The device extension.
909 * @param pSession The session to close (and free).
910 */
911void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
912{
913 unsigned i; NOREF(i);
914 Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
915 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
916
917#ifdef VBOX_WITH_HGCM
918 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
919 if (pSession->aHGCMClientIds[i])
920 {
921 VBoxGuestHGCMDisconnectInfo Info;
922 Info.result = 0;
923 Info.u32ClientID = pSession->aHGCMClientIds[i];
924 pSession->aHGCMClientIds[i] = 0;
925 Log(("VBoxGuestCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
926 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
927 }
928#endif
929
930 pSession->pDevExt = NULL;
931 pSession->Process = NIL_RTPROCESS;
932 pSession->R0Process = NIL_RTR0PROCESS;
933 vboxGuestCloseMemBalloon(pDevExt, pSession);
934 RTMemFree(pSession);
935}
936
937
938/**
939 * Links the wait-for-event entry into the tail of the given list.
940 *
941 * @param pList The list to link it into.
942 * @param pWait The wait for event entry to append.
943 */
944DECLINLINE(void) VBoxGuestWaitAppend(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
945{
946 const PVBOXGUESTWAIT pTail = pList->pTail;
947 pWait->pNext = NULL;
948 pWait->pPrev = pTail;
949 if (pTail)
950 pTail->pNext = pWait;
951 else
952 pList->pHead = pWait;
953 pList->pTail = pWait;
954}
955
956
957/**
958 * Unlinks the wait-for-event entry.
959 *
960 * @param pList The list to unlink it from.
961 * @param pWait The wait for event entry to unlink.
962 */
963DECLINLINE(void) VBoxGuestWaitUnlink(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
964{
965 const PVBOXGUESTWAIT pPrev = pWait->pPrev;
966 const PVBOXGUESTWAIT pNext = pWait->pNext;
967 if (pNext)
968 pNext->pPrev = pPrev;
969 else
970 pList->pTail = pPrev;
971 if (pPrev)
972 pPrev->pNext = pNext;
973 else
974 pList->pHead = pNext;
975}
976
977
978/**
979 * Allocates a wiat-for-event entry.
980 *
981 * @returns The wait-for-event entry.
982 * @param pDevExt The device extension.
983 * @param pSession The session that's allocating this. Can be NULL.
984 */
985static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
986{
987 /*
988 * Allocate it one way or the other.
989 */
990 PVBOXGUESTWAIT pWait = pDevExt->FreeList.pTail;
991 if (pWait)
992 {
993 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
994 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
995
996 pWait = pDevExt->FreeList.pTail;
997 if (pWait)
998 VBoxGuestWaitUnlink(&pDevExt->FreeList, pWait);
999
1000 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1001 }
1002 if (!pWait)
1003 {
1004 static unsigned s_cErrors = 0;
1005 int rc;
1006
1007 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1008 if (!pWait)
1009 {
1010 if (s_cErrors++ < 32)
1011 LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
1012 return NULL;
1013 }
1014
1015 rc = RTSemEventMultiCreate(&pWait->Event);
1016 if (RT_FAILURE(rc))
1017 {
1018 if (s_cErrors++ < 32)
1019 LogRel(("VBoxGuestCommonIOCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
1020 RTMemFree(pWait);
1021 return NULL;
1022 }
1023 }
1024
1025 /*
1026 * Zero members just as an precaution.
1027 */
1028 pWait->pNext = NULL;
1029 pWait->pPrev = NULL;
1030 pWait->fReqEvents = 0;
1031 pWait->fResEvents = 0;
1032 pWait->pSession = pSession;
1033#ifdef VBOX_WITH_HGCM
1034 pWait->pHGCMReq = NULL;
1035#endif
1036 RTSemEventMultiReset(pWait->Event);
1037 return pWait;
1038}
1039
1040
1041/**
1042 * Frees the wait-for-event entry.
1043 * The caller must own the wait spinlock!
1044 *
1045 * @param pDevExt The device extension.
1046 * @param pWait The wait-for-event entry to free.
1047 */
1048static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1049{
1050 pWait->fReqEvents = 0;
1051 pWait->fResEvents = 0;
1052#ifdef VBOX_WITH_HGCM
1053 pWait->pHGCMReq = NULL;
1054#endif
1055 VBoxGuestWaitAppend(&pDevExt->FreeList, pWait);
1056}
1057
1058
1059/**
1060 * Frees the wait-for-event entry.
1061 *
1062 * @param pDevExt The device extension.
1063 * @param pWait The wait-for-event entry to free.
1064 */
1065static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1066{
1067 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1068 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1069 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1070 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1071}
1072
1073
1074/**
1075 * Modifies the guest capabilities.
1076 *
1077 * Should be called during driver init and termination.
1078 *
1079 * @returns VBox status code.
1080 * @param fOr The Or mask (what to enable).
1081 * @param fNot The Not mask (what to disable).
1082 */
1083int VBoxGuestSetGuestCapabilities(uint32_t fOr, uint32_t fNot)
1084{
1085 VMMDevReqGuestCapabilities2 *pReq;
1086 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
1087 if (RT_FAILURE(rc))
1088 {
1089 Log(("VBoxGuestSetGuestCapabilities: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1090 sizeof(*pReq), sizeof(*pReq), rc));
1091 return rc;
1092 }
1093
1094 pReq->u32OrMask = fOr;
1095 pReq->u32NotMask = fNot;
1096
1097 rc = VbglGRPerform(&pReq->header);
1098 if (RT_FAILURE(rc))
1099 Log(("VBoxGuestSetGuestCapabilities: VbglGRPerform failed, rc=%Rrc!\n", rc));
1100
1101 VbglGRFree(&pReq->header);
1102 return rc;
1103}
1104
1105
1106/**
1107 * Implements the fast (no input or output) type of IOCtls.
1108 *
1109 * This is currently just a placeholder stub inherited from the support driver code.
1110 *
1111 * @returns VBox status code.
1112 * @param iFunction The IOCtl function number.
1113 * @param pDevExt The device extension.
1114 * @param pSession The session.
1115 */
1116int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1117{
1118 Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1119
1120 NOREF(iFunction);
1121 NOREF(pDevExt);
1122 NOREF(pSession);
1123 return VERR_NOT_SUPPORTED;
1124}
1125
1126
1127/**
1128 * Return the VMM device port.
1129 *
1130 * returns IPRT status code.
1131 * @param pDevExt The device extension.
1132 * @param pInfo The request info.
1133 * @param pcbDataReturned (out) contains the number of bytes to return.
1134 */
1135static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
1136{
1137 Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
1138 pInfo->portAddress = pDevExt->IOPortBase;
1139 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
1140 if (pcbDataReturned)
1141 *pcbDataReturned = sizeof(*pInfo);
1142 return VINF_SUCCESS;
1143}
1144
1145
1146/**
1147 * Worker VBoxGuestCommonIOCtl_WaitEvent.
1148 * The caller enters the spinlock, we may or may not leave it.
1149 *
1150 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
1151 */
1152DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWaitEventInfo *pInfo,
1153 int iEvent, const uint32_t fReqEvents, PRTSPINLOCKTMP pTmp)
1154{
1155 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
1156 if (fMatches)
1157 {
1158 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
1159 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, pTmp);
1160
1161 pInfo->u32EventFlagsOut = fMatches;
1162 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1163 if (fReqEvents & ~((uint32_t)1 << iEvent))
1164 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1165 else
1166 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1167 return VINF_SUCCESS;
1168 }
1169 return VERR_TIMEOUT;
1170}
1171
1172
1173static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1174 VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned, bool fInterruptible)
1175{
1176 pInfo->u32EventFlagsOut = 0;
1177 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1178 if (pcbDataReturned)
1179 *pcbDataReturned = sizeof(*pInfo);
1180
1181 /*
1182 * Copy and verify the input mask.
1183 */
1184 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
1185 int iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
1186 if (RT_UNLIKELY(iEvent < 0))
1187 {
1188 Log(("VBoxGuestCommonIOCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
1189 return VERR_INVALID_PARAMETER;
1190 }
1191
1192 /*
1193 * Check the condition up front, before doing the wait-for-event allocations.
1194 */
1195 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1196 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1197 int rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
1198 if (rc == VINF_SUCCESS)
1199 return rc;
1200 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1201
1202 if (!pInfo->u32TimeoutIn)
1203 {
1204 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1205 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
1206 return VERR_TIMEOUT;
1207 }
1208
1209 PVBOXGUESTWAIT pWait = VBoxGuestWaitAlloc(pDevExt, pSession);
1210 if (!pWait)
1211 return VERR_NO_MEMORY;
1212 pWait->fReqEvents = fReqEvents;
1213
1214 /*
1215 * We've got the wait entry now, re-enter the spinlock and check for the condition.
1216 * If the wait condition is met, return.
1217 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
1218 */
1219 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1220 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
1221 if (rc == VINF_SUCCESS)
1222 {
1223 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1224 return rc;
1225 }
1226 VBoxGuestWaitAppend(&pDevExt->WaitList, pWait);
1227 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1228
1229 if (fInterruptible)
1230 rc = RTSemEventMultiWaitNoResume(pWait->Event,
1231 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1232 else
1233 rc = RTSemEventMultiWait(pWait->Event,
1234 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1235
1236 /*
1237 * There is one special case here and that's when the semaphore is
1238 * destroyed upon device driver unload. This shouldn't happen of course,
1239 * but in case it does, just get out of here ASAP.
1240 */
1241 if (rc == VERR_SEM_DESTROYED)
1242 return rc;
1243
1244 /*
1245 * Unlink the wait item and dispose of it.
1246 */
1247 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1248 VBoxGuestWaitUnlink(&pDevExt->WaitList, pWait);
1249 const uint32_t fResEvents = pWait->fResEvents;
1250 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1251 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1252
1253 /*
1254 * Now deal with the return code.
1255 */
1256 if ( fResEvents
1257 && fResEvents != UINT32_MAX)
1258 {
1259 pInfo->u32EventFlagsOut = fResEvents;
1260 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1261 if (fReqEvents & ~((uint32_t)1 << iEvent))
1262 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1263 else
1264 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1265 rc = VINF_SUCCESS;
1266 }
1267 else if ( fResEvents == UINT32_MAX
1268 || rc == VERR_INTERRUPTED)
1269 {
1270 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
1271 rc = VERR_INTERRUPTED;
1272 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
1273 }
1274 else if (rc == VERR_TIMEOUT)
1275 {
1276 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1277 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
1278 }
1279 else
1280 {
1281 if (RT_SUCCESS(rc))
1282 {
1283 static unsigned s_cErrors = 0;
1284 if (s_cErrors++ < 32)
1285 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
1286 rc = VERR_INTERNAL_ERROR;
1287 }
1288 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1289 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc\n", rc));
1290 }
1291
1292 return rc;
1293}
1294
1295
1296static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1297{
1298 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1299#if defined(RT_OS_SOLARIS)
1300 RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER;
1301#endif
1302 PVBOXGUESTWAIT pWait;
1303 int rc = 0;
1304
1305 Log(("VBoxGuestCommonIOCtl: CANCEL_ALL_WAITEVENTS\n"));
1306
1307 /*
1308 * Walk the event list and wake up anyone with a matching session.
1309 *
1310 * Note! On Solaris we have to do really ugly stuff here because
1311 * RTSemEventMultiSignal cannot be called with interrupts disabled.
1312 * The hack is racy, but what we can we do... (Eliminate this
1313 * termination hack, perhaps?)
1314 */
1315#if defined(RT_OS_SOLARIS)
1316 RTThreadPreemptDisable(&State);
1317 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1318 do
1319 {
1320 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
1321 if ( pWait->pSession == pSession
1322 && pWait->fResEvents != UINT32_MAX)
1323 {
1324 RTSEMEVENTMULTI hEvent = pWait->Event;
1325 pWait->fResEvents = UINT32_MAX;
1326 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1327 /* HACK ALRET! This races wakeup + reuse! */
1328 rc |= RTSemEventMultiSignal(hEvent);
1329 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1330 break;
1331 }
1332 } while (pWait);
1333 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1334 RTThreadPreemptDisable(&State);
1335#else
1336 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1337 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
1338 if (pWait->pSession == pSession)
1339 {
1340 pWait->fResEvents = UINT32_MAX;
1341 rc |= RTSemEventMultiSignal(pWait->Event);
1342 }
1343 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1344#endif
1345 Assert(rc == 0);
1346
1347 return VINF_SUCCESS;
1348}
1349
1350
1351static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1352 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
1353{
1354 Log(("VBoxGuestCommonIOCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
1355
1356 /*
1357 * Validate the header and request size.
1358 */
1359 const VMMDevRequestType enmType = pReqHdr->requestType;
1360 const uint32_t cbReq = pReqHdr->size;
1361 const uint32_t cbMinSize = vmmdevGetRequestSize(enmType);
1362 if (cbReq < cbMinSize)
1363 {
1364 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
1365 cbReq, cbMinSize, enmType));
1366 return VERR_INVALID_PARAMETER;
1367 }
1368 if (cbReq > cbData)
1369 {
1370 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
1371 cbData, cbReq, enmType));
1372 return VERR_INVALID_PARAMETER;
1373 }
1374 int rc = VbglGRVerify(pReqHdr, cbData);
1375 if (RT_FAILURE(rc))
1376 {
1377 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc!!\n",
1378 cbData, cbReq, enmType, rc));
1379 return rc;
1380 }
1381
1382 /*
1383 * Make a copy of the request in the physical memory heap so
1384 * the VBoxGuestLibrary can more easily deal with the request.
1385 * (This is really a waste of time since the OS or the OS specific
1386 * code has already buffered or locked the input/output buffer, but
1387 * it does makes things a bit simpler wrt to phys address.)
1388 */
1389 VMMDevRequestHeader *pReqCopy;
1390 rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
1391 if (RT_FAILURE(rc))
1392 {
1393 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1394 cbReq, cbReq, rc));
1395 return rc;
1396 }
1397 memcpy(pReqCopy, pReqHdr, cbReq);
1398
1399 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
1400 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
1401
1402 rc = VbglGRPerform(pReqCopy);
1403 if ( RT_SUCCESS(rc)
1404 && RT_SUCCESS(pReqCopy->rc))
1405 {
1406 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
1407 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
1408
1409 memcpy(pReqHdr, pReqCopy, cbReq);
1410 if (pcbDataReturned)
1411 *pcbDataReturned = cbReq;
1412 }
1413 else if (RT_FAILURE(rc))
1414 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
1415 else
1416 {
1417 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
1418 rc = pReqCopy->rc;
1419 }
1420
1421 VbglGRFree(pReqCopy);
1422 return rc;
1423}
1424
1425
1426static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, VBoxGuestFilterMaskInfo *pInfo)
1427{
1428 VMMDevCtlGuestFilterMask *pReq;
1429 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
1430 if (RT_FAILURE(rc))
1431 {
1432 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1433 sizeof(*pReq), sizeof(*pReq), rc));
1434 return rc;
1435 }
1436
1437 pReq->u32OrMask = pInfo->u32OrMask;
1438 pReq->u32NotMask = pInfo->u32NotMask;
1439 pReq->u32NotMask &= ~pDevExt->fFixedEvents; /* don't permit these to be cleared! */
1440 rc = VbglGRPerform(&pReq->header);
1441 if (RT_FAILURE(rc))
1442 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: VbglGRPerform failed, rc=%Rrc!\n", rc));
1443
1444 VbglGRFree(&pReq->header);
1445 return rc;
1446}
1447
1448#ifdef VBOX_WITH_HGCM
1449
1450AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
1451
1452/** Worker for VBoxGuestHGCMAsyncWaitCallback*. */
1453static int VBoxGuestHGCMAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
1454 bool fInterruptible, uint32_t cMillies)
1455{
1456
1457 /*
1458 * Check to see if the condition was met by the time we got here.
1459 *
1460 * We create a simple poll loop here for dealing with out-of-memory
1461 * conditions since the caller isn't necessarily able to deal with
1462 * us returning too early.
1463 */
1464 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1465 PVBOXGUESTWAIT pWait;
1466 for (;;)
1467 {
1468 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1469 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1470 {
1471 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1472 return VINF_SUCCESS;
1473 }
1474 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1475
1476 pWait = VBoxGuestWaitAlloc(pDevExt, NULL);
1477 if (pWait)
1478 break;
1479 if (fInterruptible)
1480 return VERR_INTERRUPTED;
1481 RTThreadSleep(1);
1482 }
1483 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
1484 pWait->pHGCMReq = pHdr;
1485
1486 /*
1487 * Re-enter the spinlock and re-check for the condition.
1488 * If the condition is met, return.
1489 * Otherwise link us into the HGCM wait list and go to sleep.
1490 */
1491 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1492 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1493 {
1494 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1495 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1496 return VINF_SUCCESS;
1497 }
1498 VBoxGuestWaitAppend(&pDevExt->HGCMWaitList, pWait);
1499 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1500
1501 int rc;
1502 if (fInterruptible)
1503 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
1504 else
1505 rc = RTSemEventMultiWait(pWait->Event, cMillies);
1506 if (rc == VERR_SEM_DESTROYED)
1507 return rc;
1508
1509 /*
1510 * Unlink, free and return.
1511 */
1512 if ( RT_FAILURE(rc)
1513 && rc != VERR_TIMEOUT
1514 && ( !fInterruptible
1515 || rc != VERR_INTERRUPTED))
1516 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
1517
1518 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1519 VBoxGuestWaitUnlink(&pDevExt->HGCMWaitList, pWait);
1520 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1521 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1522 return rc;
1523}
1524
1525
1526/**
1527 * This is a callback for dealing with async waits.
1528 *
1529 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1530 */
1531static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
1532{
1533 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1534 Log(("VBoxGuestHGCMAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
1535 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1536 pDevExt,
1537 false /* fInterruptible */,
1538 u32User /* cMillies */);
1539}
1540
1541
1542/**
1543 * This is a callback for dealing with async waits with a timeout.
1544 *
1545 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1546 */
1547static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr,
1548 void *pvUser, uint32_t u32User)
1549{
1550 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1551 Log(("VBoxGuestHGCMAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
1552 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1553 pDevExt,
1554 true /* fInterruptible */,
1555 u32User /* cMillies */ );
1556
1557}
1558
1559
1560static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMConnectInfo *pInfo,
1561 size_t *pcbDataReturned)
1562{
1563 /*
1564 * The VbglHGCMConnect call will invoke the callback if the HGCM
1565 * call is performed in an ASYNC fashion. The function is not able
1566 * to deal with cancelled requests.
1567 */
1568 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
1569 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
1570 ? pInfo->Loc.u.host.achName : "<not local host>"));
1571
1572 int rc = VbglR0HGCMInternalConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1573 if (RT_SUCCESS(rc))
1574 {
1575 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
1576 pInfo->u32ClientID, pInfo->result, rc));
1577 if (RT_SUCCESS(pInfo->result))
1578 {
1579 /*
1580 * Append the client id to the client id table.
1581 * If the table has somehow become filled up, we'll disconnect the session.
1582 */
1583 unsigned i;
1584 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1585 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1586 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1587 if (!pSession->aHGCMClientIds[i])
1588 {
1589 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
1590 break;
1591 }
1592 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1593 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1594 {
1595 static unsigned s_cErrors = 0;
1596 if (s_cErrors++ < 32)
1597 LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
1598
1599 VBoxGuestHGCMDisconnectInfo Info;
1600 Info.result = 0;
1601 Info.u32ClientID = pInfo->u32ClientID;
1602 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1603 return VERR_TOO_MANY_OPEN_FILES;
1604 }
1605 }
1606 if (pcbDataReturned)
1607 *pcbDataReturned = sizeof(*pInfo);
1608 }
1609 return rc;
1610}
1611
1612
1613static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
1614 size_t *pcbDataReturned)
1615{
1616 /*
1617 * Validate the client id and invalidate its entry while we're in the call.
1618 */
1619 const uint32_t u32ClientId = pInfo->u32ClientID;
1620 unsigned i;
1621 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1622 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1623 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1624 if (pSession->aHGCMClientIds[i] == u32ClientId)
1625 {
1626 pSession->aHGCMClientIds[i] = UINT32_MAX;
1627 break;
1628 }
1629 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1630 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1631 {
1632 static unsigned s_cErrors = 0;
1633 if (s_cErrors++ > 32)
1634 LogRel(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
1635 return VERR_INVALID_HANDLE;
1636 }
1637
1638 /*
1639 * The VbglHGCMConnect call will invoke the callback if the HGCM
1640 * call is performed in an ASYNC fashion. The function is not able
1641 * to deal with cancelled requests.
1642 */
1643 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
1644 int rc = VbglR0HGCMInternalDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1645 if (RT_SUCCESS(rc))
1646 {
1647 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
1648 if (pcbDataReturned)
1649 *pcbDataReturned = sizeof(*pInfo);
1650 }
1651
1652 /* Update the client id array according to the result. */
1653 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1654 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
1655 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
1656 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1657
1658 return rc;
1659}
1660
1661
1662static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt,
1663 PVBOXGUESTSESSION pSession,
1664 VBoxGuestHGCMCallInfo *pInfo,
1665 uint32_t cMillies, bool fInterruptible, bool f32bit,
1666 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
1667{
1668 /*
1669 * Some more validations.
1670 */
1671 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
1672 {
1673 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
1674 return VERR_INVALID_PARAMETER;
1675 }
1676 size_t cbActual = cbExtra + sizeof(*pInfo);
1677#ifdef RT_ARCH_AMD64
1678 if (f32bit)
1679 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
1680 else
1681#endif
1682 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
1683 if (cbData < cbActual)
1684 {
1685 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
1686 cbData, cbActual));
1687 return VERR_INVALID_PARAMETER;
1688 }
1689
1690 /*
1691 * Validate the client id.
1692 */
1693 const uint32_t u32ClientId = pInfo->u32ClientID;
1694 unsigned i;
1695 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1696 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1697 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1698 if (pSession->aHGCMClientIds[i] == u32ClientId)
1699 break;
1700 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1701 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
1702 {
1703 static unsigned s_cErrors = 0;
1704 if (s_cErrors++ > 32)
1705 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
1706 return VERR_INVALID_HANDLE;
1707 }
1708
1709 /*
1710 * The VbglHGCMCall call will invoke the callback if the HGCM
1711 * call is performed in an ASYNC fashion. This function can
1712 * deal with cancelled requests, so we let user more requests
1713 * be interruptible (should add a flag for this later I guess).
1714 */
1715 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
1716 int rc;
1717 uint32_t fFlags = pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
1718#ifdef RT_ARCH_AMD64
1719 if (f32bit)
1720 {
1721 if (fInterruptible)
1722 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1723 else
1724 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1725 }
1726 else
1727#endif
1728 {
1729 if (fInterruptible)
1730 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1731 else
1732 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1733 }
1734 if (RT_SUCCESS(rc))
1735 {
1736 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
1737 if (pcbDataReturned)
1738 *pcbDataReturned = cbActual;
1739 }
1740 else
1741 {
1742 if ( rc != VERR_INTERRUPTED
1743 && rc != VERR_TIMEOUT)
1744 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
1745 else
1746 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
1747 }
1748 return rc;
1749}
1750
1751
1752/**
1753 * @returns VBox status code. Unlike the other HGCM IOCtls this will combine
1754 * the VbglHGCMConnect/Disconnect return code with the Info.result.
1755 *
1756 * @param pDevExt The device extension.
1757 * @param pu32ClientId The client id.
1758 * @param pcbDataReturned Where to store the amount of returned data. Can
1759 * be NULL.
1760 */
1761static int VBoxGuestCommonIOCtl_HGCMClipboardReConnect(PVBOXGUESTDEVEXT pDevExt, uint32_t *pu32ClientId, size_t *pcbDataReturned)
1762{
1763 int rc;
1764 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: Current u32ClientId=%RX32\n", pDevExt->u32ClipboardClientId));
1765
1766
1767 /*
1768 * If there is an old client, try disconnect it first.
1769 */
1770 if (pDevExt->u32ClipboardClientId != 0)
1771 {
1772 VBoxGuestHGCMDisconnectInfo Info;
1773 Info.result = VERR_WRONG_ORDER;
1774 Info.u32ClientID = pDevExt->u32ClipboardClientId;
1775 rc = VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1776 if (RT_SUCCESS(rc))
1777 {
1778 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. VbglHGCMDisconnect -> rc=%Rrc\n", rc));
1779 return rc;
1780 }
1781 if (RT_FAILURE((int32_t)Info.result))
1782 {
1783 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. Info.result=%Rrc\n", rc));
1784 return Info.result;
1785 }
1786 pDevExt->u32ClipboardClientId = 0;
1787 }
1788
1789 /*
1790 * Try connect.
1791 */
1792 VBoxGuestHGCMConnectInfo Info;
1793 Info.Loc.type = VMMDevHGCMLoc_LocalHost_Existing;
1794 strcpy(Info.Loc.u.host.achName, "VBoxSharedClipboard");
1795 Info.u32ClientID = 0;
1796 Info.result = VERR_WRONG_ORDER;
1797
1798 rc = VbglR0HGCMInternalConnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1799 if (RT_FAILURE(rc))
1800 {
1801 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1802 return rc;
1803 }
1804 if (RT_FAILURE(Info.result))
1805 {
1806 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1807 return rc;
1808 }
1809
1810 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: connected successfully u32ClientId=%RX32\n", Info.u32ClientID));
1811
1812 pDevExt->u32ClipboardClientId = Info.u32ClientID;
1813 *pu32ClientId = Info.u32ClientID;
1814 if (pcbDataReturned)
1815 *pcbDataReturned = sizeof(uint32_t);
1816
1817 return VINF_SUCCESS;
1818}
1819
1820#endif /* VBOX_WITH_HGCM */
1821
1822/**
1823 * Handle VBOXGUEST_IOCTL_CHECK_BALLOON from R3.
1824 *
1825 * Ask the host for the size of the balloon and try to set it accordingly. If
1826 * this approach fails because it's not supported, return with fHandleInR3 set
1827 * and let the user land supply memory we can lock via the other ioctl.
1828 *
1829 * @returns VBox status code.
1830 *
1831 * @param pDevExt The device extension.
1832 * @param pSession The session.
1833 * @param pInfo The output buffer.
1834 * @param pcbDataReturned Where to store the amount of returned data. Can
1835 * be NULL.
1836 */
1837static int VBoxGuestCommonIOCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1838 VBoxGuestCheckBalloonInfo *pInfo, size_t *pcbDataReturned)
1839{
1840 VMMDevGetMemBalloonChangeRequest *pReq;
1841 int rc;
1842
1843 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON\n"));
1844 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
1845 AssertRCReturn(rc, rc);
1846
1847 /*
1848 * The first user trying to query/change the balloon becomes the
1849 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
1850 */
1851 if ( pDevExt->MemBalloon.pOwner != pSession
1852 && pDevExt->MemBalloon.pOwner == NULL)
1853 pDevExt->MemBalloon.pOwner = pSession;
1854
1855 if (pDevExt->MemBalloon.pOwner == pSession)
1856 {
1857 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
1858 if (RT_SUCCESS(rc))
1859 {
1860 /*
1861 * This is a response to that event. Setting this bit means that
1862 * we request the value from the host and change the guest memory
1863 * balloon according to this value.
1864 */
1865 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
1866 rc = VbglGRPerform(&pReq->header);
1867 if (RT_SUCCESS(rc))
1868 {
1869 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
1870 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
1871
1872 pInfo->cBalloonChunks = pReq->cBalloonChunks;
1873 pInfo->fHandleInR3 = false;
1874
1875 rc = vboxGuestSetBalloonSizeKernel(pDevExt, pReq->cBalloonChunks, &pInfo->fHandleInR3);
1876 /* Ignore various out of memory failures. */
1877 if ( rc == VERR_NO_MEMORY
1878 || rc == VERR_NO_PHYS_MEMORY
1879 || rc == VERR_NO_CONT_MEMORY)
1880 rc = VINF_SUCCESS;
1881
1882 if (pcbDataReturned)
1883 *pcbDataReturned = sizeof(VBoxGuestCheckBalloonInfo);
1884 }
1885 else
1886 LogRel(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON: VbglGRPerform failed. rc=%Rrc\n", rc));
1887 VbglGRFree(&pReq->header);
1888 }
1889 }
1890 else
1891 rc = VERR_PERMISSION_DENIED;
1892
1893 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
1894 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON returns %Rrc\n", rc));
1895 return rc;
1896}
1897
1898
1899/**
1900 * Handle a request for changing the memory balloon.
1901 *
1902 * @returns VBox status code.
1903 *
1904 * @param pDevExt The device extention.
1905 * @param pSession The session.
1906 * @param pInfo The change request structure (input).
1907 * @param pcbDataReturned Where to store the amount of returned data. Can
1908 * be NULL.
1909 */
1910static int VBoxGuestCommonIOCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1911 VBoxGuestChangeBalloonInfo *pInfo, size_t *pcbDataReturned)
1912{
1913 int rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
1914 AssertRCReturn(rc, rc);
1915
1916 if (!pDevExt->MemBalloon.fUseKernelAPI)
1917 {
1918 /*
1919 * The first user trying to query/change the balloon becomes the
1920 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
1921 */
1922 if ( pDevExt->MemBalloon.pOwner != pSession
1923 && pDevExt->MemBalloon.pOwner == NULL)
1924 pDevExt->MemBalloon.pOwner = pSession;
1925
1926 if (pDevExt->MemBalloon.pOwner == pSession)
1927 {
1928 rc = vboxGuestSetBalloonSizeFromUser(pDevExt, pSession, pInfo->u64ChunkAddr, pInfo->fInflate);
1929 if (pcbDataReturned)
1930 *pcbDataReturned = 0;
1931 }
1932 else
1933 rc = VERR_PERMISSION_DENIED;
1934 }
1935 else
1936 rc = VERR_PERMISSION_DENIED;
1937
1938 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
1939 return rc;
1940}
1941
1942
1943/**
1944 * Guest backdoor logging.
1945 *
1946 * @returns VBox status code.
1947 *
1948 * @param pch The log message (need not be NULL terminated).
1949 * @param cbData Size of the buffer.
1950 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
1951 */
1952static int VBoxGuestCommonIOCtl_Log(const char *pch, size_t cbData, size_t *pcbDataReturned)
1953{
1954 NOREF(pch);
1955 NOREF(cbData);
1956 Log(("%.*s", cbData, pch));
1957 if (pcbDataReturned)
1958 *pcbDataReturned = 0;
1959 return VINF_SUCCESS;
1960}
1961
1962
1963/**
1964 * Common IOCtl for user to kernel and kernel to kernel communcation.
1965 *
1966 * This function only does the basic validation and then invokes
1967 * worker functions that takes care of each specific function.
1968 *
1969 * @returns VBox status code.
1970 *
1971 * @param iFunction The requested function.
1972 * @param pDevExt The device extension.
1973 * @param pSession The client session.
1974 * @param pvData The input/output data buffer. Can be NULL depending on the function.
1975 * @param cbData The max size of the data buffer.
1976 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
1977 */
1978int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1979 void *pvData, size_t cbData, size_t *pcbDataReturned)
1980{
1981 Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
1982 iFunction, pDevExt, pSession, pvData, cbData));
1983
1984 /*
1985 * Make sure the returned data size is set to zero.
1986 */
1987 if (pcbDataReturned)
1988 *pcbDataReturned = 0;
1989
1990 /*
1991 * Define some helper macros to simplify validation.
1992 */
1993#define CHECKRET_RING0(mnemonic) \
1994 do { \
1995 if (pSession->R0Process != NIL_RTR0PROCESS) \
1996 { \
1997 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
1998 pSession->Process, (uintptr_t)pSession->R0Process)); \
1999 return VERR_PERMISSION_DENIED; \
2000 } \
2001 } while (0)
2002#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
2003 do { \
2004 if (cbData < (cbMin)) \
2005 { \
2006 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
2007 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
2008 return VERR_BUFFER_OVERFLOW; \
2009 } \
2010 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
2011 { \
2012 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": Invalid pointer %p\n", pvData)); \
2013 return VERR_INVALID_POINTER; \
2014 } \
2015 } while (0)
2016
2017
2018 /*
2019 * Deal with variably sized requests first.
2020 */
2021 int rc = VINF_SUCCESS;
2022 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
2023 {
2024 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
2025 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
2026 }
2027#ifdef VBOX_WITH_HGCM
2028 /*
2029 * These ones are a bit tricky.
2030 */
2031 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
2032 {
2033 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2034 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2035 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2036 fInterruptible, false /*f32bit*/,
2037 0, cbData, pcbDataReturned);
2038 }
2039 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
2040 {
2041 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2042 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2043 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2044 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2045 false /*f32bit*/,
2046 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2047 }
2048# ifdef RT_ARCH_AMD64
2049 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
2050 {
2051 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2052 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2053 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2054 fInterruptible, true /*f32bit*/,
2055 0, cbData, pcbDataReturned);
2056 }
2057 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
2058 {
2059 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2060 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2061 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2062 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2063 true /*f32bit*/,
2064 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2065 }
2066# endif
2067#endif /* VBOX_WITH_HGCM */
2068 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
2069 {
2070 CHECKRET_MIN_SIZE("LOG", 1);
2071 rc = VBoxGuestCommonIOCtl_Log((char *)pvData, cbData, pcbDataReturned);
2072 }
2073 else
2074 {
2075 switch (iFunction)
2076 {
2077 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
2078 CHECKRET_RING0("GETVMMDEVPORT");
2079 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
2080 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
2081 break;
2082
2083 case VBOXGUEST_IOCTL_WAITEVENT:
2084 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
2085 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, pSession, (VBoxGuestWaitEventInfo *)pvData,
2086 pcbDataReturned, pSession->R0Process != NIL_RTR0PROCESS);
2087 break;
2088
2089 case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
2090 if (cbData != 0)
2091 rc = VERR_INVALID_PARAMETER;
2092 rc = VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
2093 break;
2094
2095 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
2096 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
2097 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, (VBoxGuestFilterMaskInfo *)pvData);
2098 break;
2099
2100#ifdef VBOX_WITH_HGCM
2101 case VBOXGUEST_IOCTL_HGCM_CONNECT:
2102# ifdef RT_ARCH_AMD64
2103 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
2104# endif
2105 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
2106 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
2107 break;
2108
2109 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
2110# ifdef RT_ARCH_AMD64
2111 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
2112# endif
2113 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
2114 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
2115 break;
2116
2117 case VBOXGUEST_IOCTL_CLIPBOARD_CONNECT:
2118 CHECKRET_MIN_SIZE("CLIPBOARD_CONNECT", sizeof(uint32_t));
2119 rc = VBoxGuestCommonIOCtl_HGCMClipboardReConnect(pDevExt, (uint32_t *)pvData, pcbDataReturned);
2120 break;
2121#endif /* VBOX_WITH_HGCM */
2122
2123 case VBOXGUEST_IOCTL_CHECK_BALLOON:
2124 CHECKRET_MIN_SIZE("CHECK_MEMORY_BALLOON", sizeof(VBoxGuestCheckBalloonInfo));
2125 rc = VBoxGuestCommonIOCtl_CheckMemoryBalloon(pDevExt, pSession, (VBoxGuestCheckBalloonInfo *)pvData, pcbDataReturned);
2126 break;
2127
2128 case VBOXGUEST_IOCTL_CHANGE_BALLOON:
2129 CHECKRET_MIN_SIZE("CHANGE_MEMORY_BALLOON", sizeof(VBoxGuestChangeBalloonInfo));
2130 rc = VBoxGuestCommonIOCtl_ChangeMemoryBalloon(pDevExt, pSession, (VBoxGuestChangeBalloonInfo *)pvData, pcbDataReturned);
2131 break;
2132
2133 default:
2134 {
2135 LogRel(("VBoxGuestCommonIOCtl: Unknown request iFunction=%#x Stripped size=%#x\n", iFunction,
2136 VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
2137 rc = VERR_NOT_SUPPORTED;
2138 break;
2139 }
2140 }
2141 }
2142
2143 Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
2144 return rc;
2145}
2146
2147
2148
2149/**
2150 * Common interrupt service routine.
2151 *
2152 * This deals with events and with waking up thread waiting for those events.
2153 *
2154 * @returns true if it was our interrupt, false if it wasn't.
2155 * @param pDevExt The VBoxGuest device extension.
2156 */
2157bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
2158{
2159 bool fMousePositionChanged = false;
2160 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
2161 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
2162 int rc = 0;
2163 bool fOurIrq;
2164
2165 /*
2166 * Make sure we've initalized the device extension.
2167 */
2168 if (RT_UNLIKELY(!pReq))
2169 return false;
2170
2171 /*
2172 * Enter the spinlock and check if it's our IRQ or not.
2173 *
2174 * Note! Solaris cannot do RTSemEventMultiSignal with interrupts disabled
2175 * so we're entering the spinlock without disabling them. This works
2176 * fine as long as we never called in a nested fashion.
2177 */
2178#if defined(RT_OS_SOLARIS)
2179 RTSpinlockAcquire(pDevExt->EventSpinlock, &Tmp);
2180#else
2181 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
2182#endif
2183 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
2184 if (fOurIrq)
2185 {
2186 /*
2187 * Acknowlegde events.
2188 * We don't use VbglGRPerform here as it may take another spinlocks.
2189 */
2190 pReq->header.rc = VERR_INTERNAL_ERROR;
2191 pReq->events = 0;
2192 ASMCompilerBarrier();
2193 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
2194 ASMCompilerBarrier(); /* paranoia */
2195 if (RT_SUCCESS(pReq->header.rc))
2196 {
2197 uint32_t fEvents = pReq->events;
2198 PVBOXGUESTWAIT pWait;
2199
2200 Log(("VBoxGuestCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
2201
2202 /*
2203 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
2204 */
2205 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
2206 {
2207 fMousePositionChanged = true;
2208 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
2209 }
2210
2211#ifdef VBOX_WITH_HGCM
2212 /*
2213 * The HGCM event/list is kind of different in that we evaluate all entries.
2214 */
2215 if (fEvents & VMMDEV_EVENT_HGCM)
2216 {
2217 for (pWait = pDevExt->HGCMWaitList.pHead; pWait; pWait = pWait->pNext)
2218 if ( !pWait->fResEvents
2219 && (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE))
2220 {
2221 pWait->fResEvents = VMMDEV_EVENT_HGCM;
2222 rc |= RTSemEventMultiSignal(pWait->Event);
2223 }
2224 fEvents &= ~VMMDEV_EVENT_HGCM;
2225 }
2226#endif
2227
2228 /*
2229 * Normal FIFO waiter evaluation.
2230 */
2231 fEvents |= pDevExt->f32PendingEvents;
2232 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
2233 if ( (pWait->fReqEvents & fEvents)
2234 && !pWait->fResEvents)
2235 {
2236 pWait->fResEvents = pWait->fReqEvents & fEvents;
2237 fEvents &= ~pWait->fResEvents;
2238 rc |= RTSemEventMultiSignal(pWait->Event);
2239 if (!fEvents)
2240 break;
2241 }
2242 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
2243 }
2244 else /* something is serious wrong... */
2245 Log(("VBoxGuestCommonISR: acknowledge events failed rc=%Rrc (events=%#x)!!\n",
2246 pReq->header.rc, pReq->events));
2247 }
2248 else
2249 LogFlow(("VBoxGuestCommonISR: not ours\n"));
2250
2251 /*
2252 * Work the poll and async notification queues on OSes that implements that.
2253 * Do this outside the spinlock to prevent some recursive spinlocking.
2254 */
2255#if defined(RT_OS_SOLARIS)
2256 RTSpinlockRelease(pDevExt->EventSpinlock, &Tmp);
2257#else
2258 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
2259#endif
2260
2261 if (fMousePositionChanged)
2262 {
2263 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
2264 VBoxGuestNativeISRMousePollEvent(pDevExt);
2265 }
2266
2267 Assert(rc == 0);
2268 return fOurIrq;
2269}
2270
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette