VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 29246

Last change on this file since 29246 was 29046, checked in by vboxsync, 15 years ago

typo

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 83.4 KB
Line 
1/* $Id: VBoxGuest.cpp 29046 2010-05-04 20:42:39Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEFAULT
23#include "VBoxGuestInternal.h"
24#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
25#include <VBox/log.h>
26#include <iprt/mem.h>
27#include <iprt/time.h>
28#include <iprt/memobj.h>
29#include <iprt/asm.h>
30#include <iprt/string.h>
31#include <iprt/process.h>
32#include <iprt/assert.h>
33#include <iprt/param.h>
34#ifdef VBOX_WITH_HGCM
35# include <iprt/thread.h>
36#endif
37#include "version-generated.h"
38#if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
39# include "revision-generated.h"
40#endif
41
42
43/*******************************************************************************
44* Internal Functions *
45*******************************************************************************/
46#ifdef VBOX_WITH_HGCM
47static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
48#endif
49
50
51/*******************************************************************************
52* Global Variables *
53*******************************************************************************/
54static const size_t cbChangeMemBalloonReq = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
55
56
57
58/**
59 * Reserves memory in which the VMM can relocate any guest mappings
60 * that are floating around.
61 *
62 * This operation is a little bit tricky since the VMM might not accept
63 * just any address because of address clashes between the three contexts
64 * it operates in, so use a small stack to perform this operation.
65 *
66 * @returns VBox status code (ignored).
67 * @param pDevExt The device extension.
68 */
69static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
70{
71 /*
72 * Query the required space.
73 */
74 VMMDevReqHypervisorInfo *pReq;
75 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
76 if (RT_FAILURE(rc))
77 return rc;
78 pReq->hypervisorStart = 0;
79 pReq->hypervisorSize = 0;
80 rc = VbglGRPerform(&pReq->header);
81 if (RT_FAILURE(rc)) /* this shouldn't happen! */
82 {
83 VbglGRFree(&pReq->header);
84 return rc;
85 }
86
87 /*
88 * The VMM will report back if there is nothing it wants to map, like for
89 * insance in VT-x and AMD-V mode.
90 */
91 if (pReq->hypervisorSize == 0)
92 Log(("vboxGuestInitFixateGuestMappings: nothing to do\n"));
93 else
94 {
95 /*
96 * We have to try several times since the host can be picky
97 * about certain addresses.
98 */
99 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
100 uint32_t cbHypervisor = pReq->hypervisorSize;
101 RTR0MEMOBJ ahTries[5];
102 uint32_t iTry;
103 bool fBitched = false;
104 Log(("vboxGuestInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
105 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
106 {
107 /*
108 * Reserve space, or if that isn't supported, create a object for
109 * some fictive physical memory and map that in to kernel space.
110 *
111 * To make the code a bit uglier, most systems cannot help with
112 * 4MB alignment, so we have to deal with that in addition to
113 * having two ways of getting the memory.
114 */
115 uint32_t uAlignment = _4M;
116 RTR0MEMOBJ hObj;
117 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
118 if (rc == VERR_NOT_SUPPORTED)
119 {
120 uAlignment = PAGE_SIZE;
121 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
122 }
123 if (rc == VERR_NOT_SUPPORTED)
124 {
125 if (hFictive == NIL_RTR0MEMOBJ)
126 {
127 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
128 if (RT_FAILURE(rc))
129 break;
130 hFictive = hObj;
131 }
132 uAlignment = _4M;
133 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
134 if (rc == VERR_NOT_SUPPORTED)
135 {
136 uAlignment = PAGE_SIZE;
137 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
138 }
139 }
140 if (RT_FAILURE(rc))
141 {
142 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
143 rc, cbHypervisor, uAlignment, iTry));
144 fBitched = true;
145 break;
146 }
147
148 /*
149 * Try set it.
150 */
151 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
152 pReq->header.rc = VERR_INTERNAL_ERROR;
153 pReq->hypervisorSize = cbHypervisor;
154 pReq->hypervisorStart = (uintptr_t)RTR0MemObjAddress(hObj);
155 if ( uAlignment == PAGE_SIZE
156 && pReq->hypervisorStart & (_4M - 1))
157 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
158 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
159
160 rc = VbglGRPerform(&pReq->header);
161 if (RT_SUCCESS(rc))
162 {
163 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
164 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
165 RTR0MemObjAddress(pDevExt->hGuestMappings),
166 RTR0MemObjSize(pDevExt->hGuestMappings),
167 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
168 break;
169 }
170 ahTries[iTry] = hObj;
171 }
172
173 /*
174 * Cleanup failed attempts.
175 */
176 while (iTry-- > 0)
177 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
178 if ( RT_FAILURE(rc)
179 && hFictive != NIL_RTR0PTR)
180 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
181 if (RT_FAILURE(rc) && !fBitched)
182 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
183 }
184 VbglGRFree(&pReq->header);
185
186 /*
187 * We ignore failed attempts for now.
188 */
189 return VINF_SUCCESS;
190}
191
192
193/**
194 * Undo what vboxGuestInitFixateGuestMappings did.
195 *
196 * @param pDevExt The device extension.
197 */
198static void vboxGuestTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
199{
200 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
201 {
202 /*
203 * Tell the host that we're going to free the memory we reserved for
204 * it, the free it up. (Leak the memory if anything goes wrong here.)
205 */
206 VMMDevReqHypervisorInfo *pReq;
207 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
208 if (RT_SUCCESS(rc))
209 {
210 pReq->hypervisorStart = 0;
211 pReq->hypervisorSize = 0;
212 rc = VbglGRPerform(&pReq->header);
213 VbglGRFree(&pReq->header);
214 }
215 if (RT_SUCCESS(rc))
216 {
217 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
218 AssertRC(rc);
219 }
220 else
221 LogRel(("vboxGuestTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
222
223 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
224 }
225}
226
227
228/**
229 * Sets the interrupt filter mask during initialization and termination.
230 *
231 * This will ASSUME that we're the ones in carge over the mask, so
232 * we'll simply clear all bits we don't set.
233 *
234 * @returns VBox status code (ignored).
235 * @param pDevExt The device extension.
236 * @param fMask The new mask.
237 */
238static int vboxGuestSetFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask)
239{
240 VMMDevCtlGuestFilterMask *pReq;
241 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
242 if (RT_SUCCESS(rc))
243 {
244 pReq->u32OrMask = fMask;
245 pReq->u32NotMask = ~fMask;
246 rc = VbglGRPerform(&pReq->header);
247 if ( RT_FAILURE(rc)
248 || RT_FAILURE(pReq->header.rc))
249 LogRel(("vboxGuestSetFilterMask: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
250 rc, pReq->header.rc));
251 VbglGRFree(&pReq->header);
252 }
253 return rc;
254}
255
256
257/**
258 * Report guest information to the VMMDev.
259 *
260 * @returns VBox status code.
261 * @param pDevExt The device extension.
262 * @param enmOSType The OS type to report.
263 */
264static int vboxGuestInitReportGuestInfo(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
265{
266 VMMDevReportGuestInfo *pReq;
267 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_ReportGuestInfo);
268 if (RT_SUCCESS(rc))
269 {
270 pReq->guestInfo.additionsVersion = VMMDEV_VERSION;
271 pReq->guestInfo.osType = enmOSType;
272 rc = VbglGRPerform(&pReq->header);
273 if ( RT_FAILURE(rc)
274 || RT_FAILURE(pReq->header.rc))
275 LogRel(("vboxGuestInitReportGuestInfo: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
276 rc, pReq->header.rc));
277 VbglGRFree(&pReq->header);
278 }
279 VMMDevReportGuestInfo2 *pReq2;
280 if (RT_SUCCESS(rc))
281 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq2, sizeof(*pReq2), VMMDevReq_ReportGuestInfo2);
282 if (RT_SUCCESS(rc))
283 {
284 pReq2->guestInfo.additionsMajor = VBOX_VERSION_MAJOR;
285 pReq2->guestInfo.additionsMinor = VBOX_VERSION_MINOR;
286 pReq2->guestInfo.additionsBuild = VBOX_VERSION_BUILD;
287 pReq2->guestInfo.additionsRevision = VBOX_SVN_REV;
288 pReq2->guestInfo.additionsFeatures = 0;
289 RTStrCopy(pReq2->guestInfo.szName, sizeof(pReq2->guestInfo.szName), VBOX_VERSION_STRING);
290 rc = VbglGRPerform(&pReq2->header);
291 if (rc == VERR_NOT_IMPLEMENTED) /* compatibility with older hosts */
292 rc = VINF_SUCCESS;
293 if ( RT_FAILURE(rc)
294 || RT_FAILURE(pReq2->header.rc))
295 LogRel(("vboxGuestInitReportGuestInfo2: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
296 rc, pReq2->header.rc));
297 VbglGRFree(&pReq2->header);
298 }
299 return rc;
300}
301
302
303/**
304 * Inflate the balloon by one chunk represented by an R0 memory object.
305 *
306 * The caller owns the balloon mutex.
307 *
308 * @returns IPRT status code.
309 * @param pMemObj Pointer to the R0 memory object.
310 * @param pReq The pre-allocated request for performing the VMMDev call.
311 */
312static int vboxGuestBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
313{
314 uint32_t iPage;
315 int rc;
316
317 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
318 {
319 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
320 pReq->aPhysPage[iPage] = phys;
321 }
322
323 /* Protect this memory from being accessed. Doesn't work on every platform and probably
324 * doesn't work for R3-provided memory, therefore ignore the return value. Unprotect
325 * done when object is freed. */
326 RTR0MemObjProtect(*pMemObj, 0, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, RTMEM_PROT_NONE);
327
328 pReq->fInflate = true;
329 pReq->header.size = cbChangeMemBalloonReq;
330 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
331
332 rc = VbglGRPerform(&pReq->header);
333 if (RT_FAILURE(rc))
334 LogRel(("vboxGuestBalloonInflate: VbglGRPerform failed. rc=%Rrc\n", rc));
335 return rc;
336}
337
338
339/**
340 * Deflate the balloon by one chunk - info the host and free the memory object.
341 *
342 * The caller owns the balloon mutex.
343 *
344 * @returns IPRT status code.
345 * @param pMemObj Pointer to the R0 memory object.
346 * The memory object will be freed afterwards.
347 * @param pReq The pre-allocated request for performing the VMMDev call.
348 */
349static int vboxGuestBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
350{
351 uint32_t iPage;
352 int rc;
353
354 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
355 {
356 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
357 pReq->aPhysPage[iPage] = phys;
358 }
359
360 pReq->fInflate = false;
361 pReq->header.size = cbChangeMemBalloonReq;
362 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
363
364 rc = VbglGRPerform(&pReq->header);
365 if (RT_FAILURE(rc))
366 {
367 LogRel(("vboxGuestBalloonDeflate: VbglGRPerform failed. rc=%Rrc\n", rc));
368 return rc;
369 }
370
371 /* undo previous protec call, ignore rc for reasons stated there. */
372 RTR0MemObjProtect(*pMemObj, 0, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
373 /*RTR0MemObjProtect(*pMemObj, 0, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC); - probably not safe... */
374
375 rc = RTR0MemObjFree(*pMemObj, true);
376 if (RT_FAILURE(rc))
377 {
378 LogRel(("vboxGuestBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
379 return rc;
380 }
381
382 *pMemObj = NIL_RTR0MEMOBJ;
383 return VINF_SUCCESS;
384}
385
386
387/**
388 * Inflate/deflate the memory balloon and notify the host.
389 *
390 * This is a worker used by VBoxGuestCommonIOCtl_CheckMemoryBalloon - it takes
391 * the mutex.
392 *
393 * @returns VBox status code.
394 * @param pDevExt The device extension.
395 * @param pSession The session.
396 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
397 * @param pfHandleInR3 Where to return the handle-in-ring3 indicator
398 * (VINF_SUCCESS if set).
399 */
400static int vboxGuestSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, uint32_t *pfHandleInR3)
401{
402 int rc = VINF_SUCCESS;
403
404 if (pDevExt->MemBalloon.fUseKernelAPI)
405 {
406 VMMDevChangeMemBalloon *pReq;
407 uint32_t i;
408
409 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
410 {
411 LogRel(("vboxGuestSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
412 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
413 return VERR_INVALID_PARAMETER;
414 }
415
416 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
417 return VINF_SUCCESS; /* nothing to do */
418
419 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
420 && !pDevExt->MemBalloon.paMemObj)
421 {
422 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
423 if (!pDevExt->MemBalloon.paMemObj)
424 {
425 LogRel(("VBoxGuestSetBalloonSizeKernel: no memory for paMemObj!\n"));
426 return VERR_NO_MEMORY;
427 }
428 }
429
430 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
431 if (RT_FAILURE(rc))
432 return rc;
433
434 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
435 {
436 /* inflate */
437 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
438 {
439 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
440 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
441 if (RT_FAILURE(rc))
442 {
443 if (rc == VERR_NOT_SUPPORTED)
444 {
445 /* not supported -- fall back to the R3-allocated memory. */
446 rc = VINF_SUCCESS;
447 pDevExt->MemBalloon.fUseKernelAPI = false;
448 Assert(pDevExt->MemBalloon.cChunks == 0);
449 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
450 }
451 /* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
452 * cannot allocate more memory => don't try further, just stop here */
453 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
454 break;
455 }
456
457 rc = vboxGuestBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
458 if (RT_FAILURE(rc))
459 {
460 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
461 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
462 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
463 break;
464 }
465 pDevExt->MemBalloon.cChunks++;
466 }
467 }
468 else
469 {
470 /* deflate */
471 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
472 {
473 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
474 if (RT_FAILURE(rc))
475 {
476 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
477 break;
478 }
479 pDevExt->MemBalloon.cChunks--;
480 }
481 }
482
483 VbglGRFree(&pReq->header);
484 }
485
486 /*
487 * Set the handle-in-ring3 indicator. When set Ring-3 will have to work
488 * the balloon changes via the other API.
489 */
490 *pfHandleInR3 = pDevExt->MemBalloon.fUseKernelAPI ? false : true;
491
492 return rc;
493}
494
495
496/**
497 * Inflate/deflate the balloon by one chunk.
498 *
499 * Worker for VBoxGuestCommonIOCtl_ChangeMemoryBalloon - it takes the mutex.
500 *
501 * @returns VBox status code.
502 * @param pDevExt The device extension.
503 * @param pSession The session.
504 * @param u64ChunkAddr The address of the chunk to add to / remove from the
505 * balloon.
506 * @param fInflate Inflate if true, deflate if false.
507 */
508static int vboxGuestSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
509 uint64_t u64ChunkAddr, bool fInflate)
510{
511 VMMDevChangeMemBalloon *pReq;
512 int rc = VINF_SUCCESS;
513 uint32_t i;
514 PRTR0MEMOBJ pMemObj = NULL;
515
516 if (fInflate)
517 {
518 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
519 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
520 {
521 LogRel(("vboxGuestSetBalloonSize: cannot inflate balloon, already have %u chunks (max=%u)\n",
522 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
523 return VERR_INVALID_PARAMETER;
524 }
525
526 if (!pDevExt->MemBalloon.paMemObj)
527 {
528 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
529 if (!pDevExt->MemBalloon.paMemObj)
530 {
531 LogRel(("VBoxGuestSetBalloonSizeFromUser: no memory for paMemObj!\n"));
532 return VERR_NO_MEMORY;
533 }
534 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
535 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
536 }
537 }
538 else
539 {
540 if (pDevExt->MemBalloon.cChunks == 0)
541 {
542 AssertMsgFailed(("vboxGuestSetBalloonSize: cannot decrease balloon, already at size 0\n"));
543 return VERR_INVALID_PARAMETER;
544 }
545 }
546
547 /*
548 * Enumerate all memory objects and check if the object is already registered.
549 */
550 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
551 {
552 if ( fInflate
553 && !pMemObj
554 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
555 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
556 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == u64ChunkAddr)
557 {
558 if (fInflate)
559 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
560 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
561 break;
562 }
563 }
564 if (!pMemObj)
565 {
566 if (fInflate)
567 {
568 /* no free object pointer found -- should not happen */
569 return VERR_NO_MEMORY;
570 }
571
572 /* cannot free this memory as it wasn't provided before */
573 return VERR_NOT_FOUND;
574 }
575
576 /*
577 * Try inflate / defalte the balloon as requested.
578 */
579 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
580 if (RT_FAILURE(rc))
581 return rc;
582
583 if (fInflate)
584 {
585 rc = RTR0MemObjLockUser(pMemObj, u64ChunkAddr, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
586 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
587 if (RT_SUCCESS(rc))
588 {
589 rc = vboxGuestBalloonInflate(pMemObj, pReq);
590 if (RT_SUCCESS(rc))
591 pDevExt->MemBalloon.cChunks++;
592 else
593 {
594 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
595 RTR0MemObjFree(*pMemObj, true);
596 *pMemObj = NIL_RTR0MEMOBJ;
597 }
598 }
599 }
600 else
601 {
602 rc = vboxGuestBalloonDeflate(pMemObj, pReq);
603 if (RT_SUCCESS(rc))
604 pDevExt->MemBalloon.cChunks--;
605 else
606 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
607 }
608
609 VbglGRFree(&pReq->header);
610 return rc;
611}
612
613
614/**
615 * Cleanup the memory balloon of a session.
616 *
617 * Will request the balloon mutex, so it must be valid and the caller must not
618 * own it already.
619 *
620 * @param pDevExt The device extension.
621 * @param pDevExt The session. Can be NULL at unload.
622 */
623static void vboxGuestCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
624{
625 RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
626 if ( pDevExt->MemBalloon.pOwner == pSession
627 || pSession == NULL /*unload*/)
628 {
629 if (pDevExt->MemBalloon.paMemObj)
630 {
631 VMMDevChangeMemBalloon *pReq;
632 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
633 if (RT_SUCCESS(rc))
634 {
635 uint32_t i;
636 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
637 {
638 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
639 if (RT_FAILURE(rc))
640 {
641 LogRel(("vboxGuestCloseMemBalloon: Deflate failed with rc=%Rrc. Will leak %u chunks.\n",
642 rc, pDevExt->MemBalloon.cChunks));
643 break;
644 }
645 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
646 pDevExt->MemBalloon.cChunks--;
647 }
648 VbglGRFree(&pReq->header);
649 }
650 else
651 LogRel(("vboxGuestCloseMemBalloon: Failed to allocate VMMDev request buffer (rc=%Rrc). Will leak %u chunks.\n",
652 rc, pDevExt->MemBalloon.cChunks));
653 RTMemFree(pDevExt->MemBalloon.paMemObj);
654 pDevExt->MemBalloon.paMemObj = NULL;
655 }
656
657 pDevExt->MemBalloon.pOwner = NULL;
658 }
659 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
660}
661
662
663/**
664 * Initializes the VBoxGuest device extension when the
665 * device driver is loaded.
666 *
667 * The native code locates the VMMDev on the PCI bus and retrieve
668 * the MMIO and I/O port ranges, this function will take care of
669 * mapping the MMIO memory (if present). Upon successful return
670 * the native code should set up the interrupt handler.
671 *
672 * @returns VBox status code.
673 *
674 * @param pDevExt The device extension. Allocated by the native code.
675 * @param IOPortBase The base of the I/O port range.
676 * @param pvMMIOBase The base of the MMIO memory mapping.
677 * This is optional, pass NULL if not present.
678 * @param cbMMIO The size of the MMIO memory mapping.
679 * This is optional, pass 0 if not present.
680 * @param enmOSType The guest OS type to report to the VMMDev.
681 * @param fFixedEvents Events that will be enabled upon init and no client
682 * will ever be allowed to mask.
683 */
684int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
685 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
686{
687 int rc, rc2;
688
689 /*
690 * Adjust fFixedEvents.
691 */
692#ifdef VBOX_WITH_HGCM
693 fFixedEvents |= VMMDEV_EVENT_HGCM;
694#endif
695
696 /*
697 * Initalize the data.
698 */
699 pDevExt->IOPortBase = IOPortBase;
700 pDevExt->pVMMDevMemory = NULL;
701 pDevExt->fFixedEvents = fFixedEvents;
702 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
703 pDevExt->EventSpinlock = NIL_RTSPINLOCK;
704 pDevExt->pIrqAckEvents = NULL;
705 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
706 pDevExt->WaitList.pHead = NULL;
707 pDevExt->WaitList.pTail = NULL;
708#ifdef VBOX_WITH_HGCM
709 pDevExt->HGCMWaitList.pHead = NULL;
710 pDevExt->HGCMWaitList.pTail = NULL;
711#endif
712 pDevExt->FreeList.pHead = NULL;
713 pDevExt->FreeList.pTail = NULL;
714 pDevExt->f32PendingEvents = 0;
715 pDevExt->u32MousePosChangedSeq = 0;
716 pDevExt->SessionSpinlock = NIL_RTSPINLOCK;
717 pDevExt->u32ClipboardClientId = 0;
718 pDevExt->MemBalloon.hMtx = NIL_RTSEMFASTMUTEX;
719 pDevExt->MemBalloon.cChunks = 0;
720 pDevExt->MemBalloon.cMaxChunks = 0;
721 pDevExt->MemBalloon.fUseKernelAPI = true;
722 pDevExt->MemBalloon.paMemObj = NULL;
723 pDevExt->MemBalloon.pOwner = NULL;
724
725 /*
726 * If there is an MMIO region validate the version and size.
727 */
728 if (pvMMIOBase)
729 {
730 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
731 Assert(cbMMIO);
732 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
733 && pVMMDev->u32Size >= 32
734 && pVMMDev->u32Size <= cbMMIO)
735 {
736 pDevExt->pVMMDevMemory = pVMMDev;
737 Log(("VBoxGuestInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
738 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
739 }
740 else /* try live without it. */
741 LogRel(("VBoxGuestInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
742 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
743 }
744
745 /*
746 * Create the wait and session spinlocks as well as the ballooning mutex.
747 */
748 rc = RTSpinlockCreate(&pDevExt->EventSpinlock);
749 if (RT_SUCCESS(rc))
750 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock);
751 if (RT_FAILURE(rc))
752 {
753 LogRel(("VBoxGuestInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
754 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
755 RTSpinlockDestroy(pDevExt->EventSpinlock);
756 return rc;
757 }
758
759 rc = RTSemFastMutexCreate(&pDevExt->MemBalloon.hMtx);
760 if (RT_FAILURE(rc))
761 {
762 LogRel(("VBoxGuestInitDevExt: failed to create mutex, rc=%Rrc!\n", rc));
763 RTSpinlockDestroy(pDevExt->SessionSpinlock);
764 RTSpinlockDestroy(pDevExt->EventSpinlock);
765 return rc;
766 }
767
768 /*
769 * Initialize the guest library and report the guest info back to VMMDev,
770 * set the interrupt control filter mask, and fixate the guest mappings
771 * made by the VMM.
772 */
773 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
774 if (RT_SUCCESS(rc))
775 {
776 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
777 if (RT_SUCCESS(rc))
778 {
779 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
780 Assert(pDevExt->PhysIrqAckEvents != 0);
781
782 rc = vboxGuestInitReportGuestInfo(pDevExt, enmOSType);
783 if (RT_SUCCESS(rc))
784 {
785 rc = vboxGuestSetFilterMask(pDevExt, fFixedEvents);
786 if (RT_SUCCESS(rc))
787 {
788 /*
789 * Disable guest graphics capability by default. The guest specific
790 * graphics driver will re-enable this when it is necessary.
791 */
792 rc = VBoxGuestSetGuestCapabilities(0, VMMDEV_GUEST_SUPPORTS_GRAPHICS);
793 if (RT_SUCCESS(rc))
794 {
795 vboxGuestInitFixateGuestMappings(pDevExt);
796 Log(("VBoxGuestInitDevExt: returns success\n"));
797 return VINF_SUCCESS;
798 }
799
800 LogRel(("VBoxGuestInitDevExt: VBoxGuestSetGuestCapabilities failed, rc=%Rrc\n", rc));
801 }
802 else
803 LogRel(("VBoxGuestInitDevExt: vboxGuestSetFilterMask failed, rc=%Rrc\n", rc));
804 }
805 else
806 LogRel(("VBoxGuestInitDevExt: vboxGuestInitReportGuestInfo failed, rc=%Rrc\n", rc));
807
808 VbglGRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
809 }
810 else
811 LogRel(("VBoxGuestInitDevExt: VBoxGRAlloc failed, rc=%Rrc\n", rc));
812
813 VbglTerminate();
814 }
815 else
816 LogRel(("VBoxGuestInitDevExt: VbglInit failed, rc=%Rrc\n", rc));
817
818 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
819 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
820 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
821 return rc; /* (failed) */
822}
823
824
825/**
826 * Deletes all the items in a wait chain.
827 * @param pWait The head of the chain.
828 */
829static void VBoxGuestDeleteWaitList(PVBOXGUESTWAITLIST pList)
830{
831 while (pList->pHead)
832 {
833 int rc2;
834 PVBOXGUESTWAIT pWait = pList->pHead;
835 pList->pHead = pWait->pNext;
836
837 pWait->pNext = NULL;
838 pWait->pPrev = NULL;
839 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
840 pWait->Event = NIL_RTSEMEVENTMULTI;
841 pWait->pSession = NULL;
842 RTMemFree(pWait);
843 }
844 pList->pHead = NULL;
845 pList->pTail = NULL;
846}
847
848
849/**
850 * Destroys the VBoxGuest device extension.
851 *
852 * The native code should call this before the driver is loaded,
853 * but don't call this on shutdown.
854 *
855 * @param pDevExt The device extension.
856 */
857void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
858{
859 int rc2;
860 Log(("VBoxGuestDeleteDevExt:\n"));
861 Log(("VBoxGuest: The additions driver is terminating.\n"));
862
863 /*
864 * Clean up the bits that involves the host first.
865 */
866 vboxGuestTermUnfixGuestMappings(pDevExt);
867 VBoxGuestSetGuestCapabilities(0, UINT32_MAX); /* clears all capabilities */
868 vboxGuestSetFilterMask(pDevExt, 0); /* filter all events */
869 vboxGuestCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
870
871 /*
872 * Cleanup all the other resources.
873 */
874 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
875 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
876 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
877
878 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
879#ifdef VBOX_WITH_HGCM
880 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
881#endif
882 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
883
884 VbglTerminate();
885
886 pDevExt->pVMMDevMemory = NULL;
887
888 pDevExt->IOPortBase = 0;
889 pDevExt->pIrqAckEvents = NULL;
890}
891
892
893/**
894 * Creates a VBoxGuest user session.
895 *
896 * The native code calls this when a ring-3 client opens the device.
897 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
898 *
899 * @returns VBox status code.
900 * @param pDevExt The device extension.
901 * @param ppSession Where to store the session on success.
902 */
903int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
904{
905 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
906 if (RT_UNLIKELY(!pSession))
907 {
908 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
909 return VERR_NO_MEMORY;
910 }
911
912 pSession->Process = RTProcSelf();
913 pSession->R0Process = RTR0ProcHandleSelf();
914 pSession->pDevExt = pDevExt;
915
916 *ppSession = pSession;
917 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
918 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
919 return VINF_SUCCESS;
920}
921
922
923/**
924 * Creates a VBoxGuest kernel session.
925 *
926 * The native code calls this when a ring-0 client connects to the device.
927 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
928 *
929 * @returns VBox status code.
930 * @param pDevExt The device extension.
931 * @param ppSession Where to store the session on success.
932 */
933int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
934{
935 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
936 if (RT_UNLIKELY(!pSession))
937 {
938 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
939 return VERR_NO_MEMORY;
940 }
941
942 pSession->Process = NIL_RTPROCESS;
943 pSession->R0Process = NIL_RTR0PROCESS;
944 pSession->pDevExt = pDevExt;
945
946 *ppSession = pSession;
947 LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
948 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
949 return VINF_SUCCESS;
950}
951
952
953
954/**
955 * Closes a VBoxGuest session.
956 *
957 * @param pDevExt The device extension.
958 * @param pSession The session to close (and free).
959 */
960void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
961{
962 unsigned i; NOREF(i);
963 Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
964 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
965
966#ifdef VBOX_WITH_HGCM
967 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
968 if (pSession->aHGCMClientIds[i])
969 {
970 VBoxGuestHGCMDisconnectInfo Info;
971 Info.result = 0;
972 Info.u32ClientID = pSession->aHGCMClientIds[i];
973 pSession->aHGCMClientIds[i] = 0;
974 Log(("VBoxGuestCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
975 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
976 }
977#endif
978
979 pSession->pDevExt = NULL;
980 pSession->Process = NIL_RTPROCESS;
981 pSession->R0Process = NIL_RTR0PROCESS;
982 vboxGuestCloseMemBalloon(pDevExt, pSession);
983 RTMemFree(pSession);
984}
985
986
987/**
988 * Links the wait-for-event entry into the tail of the given list.
989 *
990 * @param pList The list to link it into.
991 * @param pWait The wait for event entry to append.
992 */
993DECLINLINE(void) VBoxGuestWaitAppend(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
994{
995 const PVBOXGUESTWAIT pTail = pList->pTail;
996 pWait->pNext = NULL;
997 pWait->pPrev = pTail;
998 if (pTail)
999 pTail->pNext = pWait;
1000 else
1001 pList->pHead = pWait;
1002 pList->pTail = pWait;
1003}
1004
1005
1006/**
1007 * Unlinks the wait-for-event entry.
1008 *
1009 * @param pList The list to unlink it from.
1010 * @param pWait The wait for event entry to unlink.
1011 */
1012DECLINLINE(void) VBoxGuestWaitUnlink(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
1013{
1014 const PVBOXGUESTWAIT pPrev = pWait->pPrev;
1015 const PVBOXGUESTWAIT pNext = pWait->pNext;
1016 if (pNext)
1017 pNext->pPrev = pPrev;
1018 else
1019 pList->pTail = pPrev;
1020 if (pPrev)
1021 pPrev->pNext = pNext;
1022 else
1023 pList->pHead = pNext;
1024}
1025
1026
1027/**
1028 * Allocates a wiat-for-event entry.
1029 *
1030 * @returns The wait-for-event entry.
1031 * @param pDevExt The device extension.
1032 * @param pSession The session that's allocating this. Can be NULL.
1033 */
1034static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1035{
1036 /*
1037 * Allocate it one way or the other.
1038 */
1039 PVBOXGUESTWAIT pWait = pDevExt->FreeList.pTail;
1040 if (pWait)
1041 {
1042 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1043 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1044
1045 pWait = pDevExt->FreeList.pTail;
1046 if (pWait)
1047 VBoxGuestWaitUnlink(&pDevExt->FreeList, pWait);
1048
1049 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1050 }
1051 if (!pWait)
1052 {
1053 static unsigned s_cErrors = 0;
1054 int rc;
1055
1056 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1057 if (!pWait)
1058 {
1059 if (s_cErrors++ < 32)
1060 LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
1061 return NULL;
1062 }
1063
1064 rc = RTSemEventMultiCreate(&pWait->Event);
1065 if (RT_FAILURE(rc))
1066 {
1067 if (s_cErrors++ < 32)
1068 LogRel(("VBoxGuestCommonIOCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
1069 RTMemFree(pWait);
1070 return NULL;
1071 }
1072 }
1073
1074 /*
1075 * Zero members just as an precaution.
1076 */
1077 pWait->pNext = NULL;
1078 pWait->pPrev = NULL;
1079 pWait->fReqEvents = 0;
1080 pWait->fResEvents = 0;
1081 pWait->pSession = pSession;
1082#ifdef VBOX_WITH_HGCM
1083 pWait->pHGCMReq = NULL;
1084#endif
1085 RTSemEventMultiReset(pWait->Event);
1086 return pWait;
1087}
1088
1089
1090/**
1091 * Frees the wait-for-event entry.
1092 * The caller must own the wait spinlock!
1093 *
1094 * @param pDevExt The device extension.
1095 * @param pWait The wait-for-event entry to free.
1096 */
1097static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1098{
1099 pWait->fReqEvents = 0;
1100 pWait->fResEvents = 0;
1101#ifdef VBOX_WITH_HGCM
1102 pWait->pHGCMReq = NULL;
1103#endif
1104 VBoxGuestWaitAppend(&pDevExt->FreeList, pWait);
1105}
1106
1107
1108/**
1109 * Frees the wait-for-event entry.
1110 *
1111 * @param pDevExt The device extension.
1112 * @param pWait The wait-for-event entry to free.
1113 */
1114static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1115{
1116 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1117 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1118 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1119 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1120}
1121
1122
1123/**
1124 * Modifies the guest capabilities.
1125 *
1126 * Should be called during driver init and termination.
1127 *
1128 * @returns VBox status code.
1129 * @param fOr The Or mask (what to enable).
1130 * @param fNot The Not mask (what to disable).
1131 */
1132int VBoxGuestSetGuestCapabilities(uint32_t fOr, uint32_t fNot)
1133{
1134 VMMDevReqGuestCapabilities2 *pReq;
1135 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
1136 if (RT_FAILURE(rc))
1137 {
1138 Log(("VBoxGuestSetGuestCapabilities: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1139 sizeof(*pReq), sizeof(*pReq), rc));
1140 return rc;
1141 }
1142
1143 pReq->u32OrMask = fOr;
1144 pReq->u32NotMask = fNot;
1145
1146 rc = VbglGRPerform(&pReq->header);
1147 if (RT_FAILURE(rc))
1148 Log(("VBoxGuestSetGuestCapabilities:VbglGRPerform failed, rc=%Rrc!\n", rc));
1149 else if (RT_FAILURE(pReq->header.rc))
1150 {
1151 Log(("VBoxGuestSetGuestCapabilities: The request failed; VMMDev rc=%Rrc!\n", pReq->header.rc));
1152 rc = pReq->header.rc;
1153 }
1154
1155 VbglGRFree(&pReq->header);
1156 return rc;
1157}
1158
1159
1160/**
1161 * Implements the fast (no input or output) type of IOCtls.
1162 *
1163 * This is currently just a placeholder stub inherited from the support driver code.
1164 *
1165 * @returns VBox status code.
1166 * @param iFunction The IOCtl function number.
1167 * @param pDevExt The device extension.
1168 * @param pSession The session.
1169 */
1170int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1171{
1172 Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1173
1174 NOREF(iFunction);
1175 NOREF(pDevExt);
1176 NOREF(pSession);
1177 return VERR_NOT_SUPPORTED;
1178}
1179
1180
1181/**
1182 * Return the VMM device port.
1183 *
1184 * returns IPRT status code.
1185 * @param pDevExt The device extension.
1186 * @param pInfo The request info.
1187 * @param pcbDataReturned (out) contains the number of bytes to return.
1188 */
1189static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
1190{
1191 Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
1192 pInfo->portAddress = pDevExt->IOPortBase;
1193 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
1194 if (pcbDataReturned)
1195 *pcbDataReturned = sizeof(*pInfo);
1196 return VINF_SUCCESS;
1197}
1198
1199
1200/**
1201 * Worker VBoxGuestCommonIOCtl_WaitEvent.
1202 * The caller enters the spinlock, we may or may not leave it.
1203 *
1204 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
1205 */
1206DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWaitEventInfo *pInfo,
1207 int iEvent, const uint32_t fReqEvents, PRTSPINLOCKTMP pTmp)
1208{
1209 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
1210 if (fMatches)
1211 {
1212 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
1213 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, pTmp);
1214
1215 pInfo->u32EventFlagsOut = fMatches;
1216 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1217 if (fReqEvents & ~((uint32_t)1 << iEvent))
1218 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1219 else
1220 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1221 return VINF_SUCCESS;
1222 }
1223 return VERR_TIMEOUT;
1224}
1225
1226
1227static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1228 VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned, bool fInterruptible)
1229{
1230 pInfo->u32EventFlagsOut = 0;
1231 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1232 if (pcbDataReturned)
1233 *pcbDataReturned = sizeof(*pInfo);
1234
1235 /*
1236 * Copy and verify the input mask.
1237 */
1238 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
1239 int iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
1240 if (RT_UNLIKELY(iEvent < 0))
1241 {
1242 Log(("VBoxGuestCommonIOCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
1243 return VERR_INVALID_PARAMETER;
1244 }
1245
1246 /*
1247 * Check the condition up front, before doing the wait-for-event allocations.
1248 */
1249 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1250 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1251 int rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
1252 if (rc == VINF_SUCCESS)
1253 return rc;
1254 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1255
1256 if (!pInfo->u32TimeoutIn)
1257 {
1258 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1259 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
1260 return VERR_TIMEOUT;
1261 }
1262
1263 PVBOXGUESTWAIT pWait = VBoxGuestWaitAlloc(pDevExt, pSession);
1264 if (!pWait)
1265 return VERR_NO_MEMORY;
1266 pWait->fReqEvents = fReqEvents;
1267
1268 /*
1269 * We've got the wait entry now, re-enter the spinlock and check for the condition.
1270 * If the wait condition is met, return.
1271 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
1272 */
1273 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1274 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
1275 if (rc == VINF_SUCCESS)
1276 {
1277 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1278 return rc;
1279 }
1280 VBoxGuestWaitAppend(&pDevExt->WaitList, pWait);
1281 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1282
1283 if (fInterruptible)
1284 rc = RTSemEventMultiWaitNoResume(pWait->Event,
1285 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1286 else
1287 rc = RTSemEventMultiWait(pWait->Event,
1288 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1289
1290 /*
1291 * There is one special case here and that's when the semaphore is
1292 * destroyed upon device driver unload. This shouldn't happen of course,
1293 * but in case it does, just get out of here ASAP.
1294 */
1295 if (rc == VERR_SEM_DESTROYED)
1296 return rc;
1297
1298 /*
1299 * Unlink the wait item and dispose of it.
1300 */
1301 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1302 VBoxGuestWaitUnlink(&pDevExt->WaitList, pWait);
1303 const uint32_t fResEvents = pWait->fResEvents;
1304 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1305 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1306
1307 /*
1308 * Now deal with the return code.
1309 */
1310 if ( fResEvents
1311 && fResEvents != UINT32_MAX)
1312 {
1313 pInfo->u32EventFlagsOut = fResEvents;
1314 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1315 if (fReqEvents & ~((uint32_t)1 << iEvent))
1316 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1317 else
1318 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1319 rc = VINF_SUCCESS;
1320 }
1321 else if ( fResEvents == UINT32_MAX
1322 || rc == VERR_INTERRUPTED)
1323 {
1324 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
1325 rc = VERR_INTERRUPTED;
1326 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
1327 }
1328 else if (rc == VERR_TIMEOUT)
1329 {
1330 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1331 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
1332 }
1333 else
1334 {
1335 if (RT_SUCCESS(rc))
1336 {
1337 static unsigned s_cErrors = 0;
1338 if (s_cErrors++ < 32)
1339 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
1340 rc = VERR_INTERNAL_ERROR;
1341 }
1342 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1343 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc\n", rc));
1344 }
1345
1346 return rc;
1347}
1348
1349
1350static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1351{
1352 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1353#if defined(RT_OS_SOLARIS)
1354 RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER;
1355#endif
1356 PVBOXGUESTWAIT pWait;
1357 int rc = 0;
1358
1359 Log(("VBoxGuestCommonIOCtl: CANCEL_ALL_WAITEVENTS\n"));
1360
1361 /*
1362 * Walk the event list and wake up anyone with a matching session.
1363 *
1364 * Note! On Solaris we have to do really ugly stuff here because
1365 * RTSemEventMultiSignal cannot be called with interrupts disabled.
1366 * The hack is racy, but what we can we do... (Eliminate this
1367 * termination hack, perhaps?)
1368 */
1369#if defined(RT_OS_SOLARIS)
1370 RTThreadPreemptDisable(&State);
1371 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1372 do
1373 {
1374 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
1375 if ( pWait->pSession == pSession
1376 && pWait->fResEvents != UINT32_MAX)
1377 {
1378 RTSEMEVENTMULTI hEvent = pWait->Event;
1379 pWait->fResEvents = UINT32_MAX;
1380 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1381 /* HACK ALRET! This races wakeup + reuse! */
1382 rc |= RTSemEventMultiSignal(hEvent);
1383 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1384 break;
1385 }
1386 } while (pWait);
1387 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1388 RTThreadPreemptDisable(&State);
1389#else
1390 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1391 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
1392 if (pWait->pSession == pSession)
1393 {
1394 pWait->fResEvents = UINT32_MAX;
1395 rc |= RTSemEventMultiSignal(pWait->Event);
1396 }
1397 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1398#endif
1399 Assert(rc == 0);
1400
1401 return VINF_SUCCESS;
1402}
1403
1404
1405static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1406 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
1407{
1408 Log(("VBoxGuestCommonIOCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
1409
1410 /*
1411 * Validate the header and request size.
1412 */
1413 const VMMDevRequestType enmType = pReqHdr->requestType;
1414 const uint32_t cbReq = pReqHdr->size;
1415 const uint32_t cbMinSize = vmmdevGetRequestSize(enmType);
1416 if (cbReq < cbMinSize)
1417 {
1418 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
1419 cbReq, cbMinSize, enmType));
1420 return VERR_INVALID_PARAMETER;
1421 }
1422 if (cbReq > cbData)
1423 {
1424 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
1425 cbData, cbReq, enmType));
1426 return VERR_INVALID_PARAMETER;
1427 }
1428 int rc = VbglGRVerify(pReqHdr, cbData);
1429 if (RT_FAILURE(rc))
1430 {
1431 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc!!\n",
1432 cbData, cbReq, enmType, rc));
1433 return rc;
1434 }
1435
1436 /*
1437 * Make a copy of the request in the physical memory heap so
1438 * the VBoxGuestLibrary can more easily deal with the request.
1439 * (This is really a waste of time since the OS or the OS specific
1440 * code has already buffered or locked the input/output buffer, but
1441 * it does makes things a bit simpler wrt to phys address.)
1442 */
1443 VMMDevRequestHeader *pReqCopy;
1444 rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
1445 if (RT_FAILURE(rc))
1446 {
1447 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1448 cbReq, cbReq, rc));
1449 return rc;
1450 }
1451 memcpy(pReqCopy, pReqHdr, cbReq);
1452
1453 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
1454 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
1455
1456 rc = VbglGRPerform(pReqCopy);
1457 if ( RT_SUCCESS(rc)
1458 && RT_SUCCESS(pReqCopy->rc))
1459 {
1460 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
1461 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
1462
1463 memcpy(pReqHdr, pReqCopy, cbReq);
1464 if (pcbDataReturned)
1465 *pcbDataReturned = cbReq;
1466 }
1467 else if (RT_FAILURE(rc))
1468 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
1469 else
1470 {
1471 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
1472 rc = pReqCopy->rc;
1473 }
1474
1475 VbglGRFree(pReqCopy);
1476 return rc;
1477}
1478
1479
1480static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, VBoxGuestFilterMaskInfo *pInfo)
1481{
1482 VMMDevCtlGuestFilterMask *pReq;
1483 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
1484 if (RT_FAILURE(rc))
1485 {
1486 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1487 sizeof(*pReq), sizeof(*pReq), rc));
1488 return rc;
1489 }
1490
1491 pReq->u32OrMask = pInfo->u32OrMask;
1492 pReq->u32NotMask = pInfo->u32NotMask;
1493 pReq->u32NotMask &= ~pDevExt->fFixedEvents; /* don't permit these to be cleared! */
1494 rc = VbglGRPerform(&pReq->header);
1495 if (RT_FAILURE(rc))
1496 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: VbglGRPerform failed, rc=%Rrc!\n", rc));
1497 else if (RT_FAILURE(pReq->header.rc))
1498 {
1499 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: The request failed; VMMDev rc=%Rrc!\n", pReq->header.rc));
1500 rc = pReq->header.rc;
1501 }
1502
1503 VbglGRFree(&pReq->header);
1504 return rc;
1505}
1506
1507#ifdef VBOX_WITH_HGCM
1508
1509AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
1510
1511/** Worker for VBoxGuestHGCMAsyncWaitCallback*. */
1512static int VBoxGuestHGCMAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
1513 bool fInterruptible, uint32_t cMillies)
1514{
1515
1516 /*
1517 * Check to see if the condition was met by the time we got here.
1518 *
1519 * We create a simple poll loop here for dealing with out-of-memory
1520 * conditions since the caller isn't necessarily able to deal with
1521 * us returning too early.
1522 */
1523 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1524 PVBOXGUESTWAIT pWait;
1525 for (;;)
1526 {
1527 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1528 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1529 {
1530 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1531 return VINF_SUCCESS;
1532 }
1533 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1534
1535 pWait = VBoxGuestWaitAlloc(pDevExt, NULL);
1536 if (pWait)
1537 break;
1538 if (fInterruptible)
1539 return VERR_INTERRUPTED;
1540 RTThreadSleep(1);
1541 }
1542 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
1543 pWait->pHGCMReq = pHdr;
1544
1545 /*
1546 * Re-enter the spinlock and re-check for the condition.
1547 * If the condition is met, return.
1548 * Otherwise link us into the HGCM wait list and go to sleep.
1549 */
1550 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1551 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1552 {
1553 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1554 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1555 return VINF_SUCCESS;
1556 }
1557 VBoxGuestWaitAppend(&pDevExt->HGCMWaitList, pWait);
1558 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1559
1560 int rc;
1561 if (fInterruptible)
1562 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
1563 else
1564 rc = RTSemEventMultiWait(pWait->Event, cMillies);
1565 if (rc == VERR_SEM_DESTROYED)
1566 return rc;
1567
1568 /*
1569 * Unlink, free and return.
1570 */
1571 if ( RT_FAILURE(rc)
1572 && rc != VERR_TIMEOUT
1573 && ( !fInterruptible
1574 || rc != VERR_INTERRUPTED))
1575 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
1576
1577 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1578 VBoxGuestWaitUnlink(&pDevExt->HGCMWaitList, pWait);
1579 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1580 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1581 return rc;
1582}
1583
1584
1585/**
1586 * This is a callback for dealing with async waits.
1587 *
1588 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1589 */
1590static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
1591{
1592 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1593 Log(("VBoxGuestHGCMAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
1594 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1595 pDevExt,
1596 false /* fInterruptible */,
1597 u32User /* cMillies */);
1598}
1599
1600
1601/**
1602 * This is a callback for dealing with async waits with a timeout.
1603 *
1604 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1605 */
1606static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr,
1607 void *pvUser, uint32_t u32User)
1608{
1609 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1610 Log(("VBoxGuestHGCMAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
1611 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1612 pDevExt,
1613 true /* fInterruptible */,
1614 u32User /* cMillies */ );
1615
1616}
1617
1618
1619static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMConnectInfo *pInfo,
1620 size_t *pcbDataReturned)
1621{
1622 /*
1623 * The VbglHGCMConnect call will invoke the callback if the HGCM
1624 * call is performed in an ASYNC fashion. The function is not able
1625 * to deal with cancelled requests.
1626 */
1627 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
1628 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
1629 ? pInfo->Loc.u.host.achName : "<not local host>"));
1630
1631 int rc = VbglR0HGCMInternalConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1632 if (RT_SUCCESS(rc))
1633 {
1634 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
1635 pInfo->u32ClientID, pInfo->result, rc));
1636 if (RT_SUCCESS(pInfo->result))
1637 {
1638 /*
1639 * Append the client id to the client id table.
1640 * If the table has somehow become filled up, we'll disconnect the session.
1641 */
1642 unsigned i;
1643 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1644 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1645 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1646 if (!pSession->aHGCMClientIds[i])
1647 {
1648 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
1649 break;
1650 }
1651 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1652 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1653 {
1654 static unsigned s_cErrors = 0;
1655 if (s_cErrors++ < 32)
1656 LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
1657
1658 VBoxGuestHGCMDisconnectInfo Info;
1659 Info.result = 0;
1660 Info.u32ClientID = pInfo->u32ClientID;
1661 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1662 return VERR_TOO_MANY_OPEN_FILES;
1663 }
1664 }
1665 if (pcbDataReturned)
1666 *pcbDataReturned = sizeof(*pInfo);
1667 }
1668 return rc;
1669}
1670
1671
1672static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
1673 size_t *pcbDataReturned)
1674{
1675 /*
1676 * Validate the client id and invalidate its entry while we're in the call.
1677 */
1678 const uint32_t u32ClientId = pInfo->u32ClientID;
1679 unsigned i;
1680 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1681 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1682 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1683 if (pSession->aHGCMClientIds[i] == u32ClientId)
1684 {
1685 pSession->aHGCMClientIds[i] = UINT32_MAX;
1686 break;
1687 }
1688 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1689 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1690 {
1691 static unsigned s_cErrors = 0;
1692 if (s_cErrors++ > 32)
1693 LogRel(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
1694 return VERR_INVALID_HANDLE;
1695 }
1696
1697 /*
1698 * The VbglHGCMConnect call will invoke the callback if the HGCM
1699 * call is performed in an ASYNC fashion. The function is not able
1700 * to deal with cancelled requests.
1701 */
1702 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
1703 int rc = VbglR0HGCMInternalDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1704 if (RT_SUCCESS(rc))
1705 {
1706 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
1707 if (pcbDataReturned)
1708 *pcbDataReturned = sizeof(*pInfo);
1709 }
1710
1711 /* Update the client id array according to the result. */
1712 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1713 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
1714 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
1715 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1716
1717 return rc;
1718}
1719
1720
1721static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt,
1722 PVBOXGUESTSESSION pSession,
1723 VBoxGuestHGCMCallInfo *pInfo,
1724 uint32_t cMillies, bool fInterruptible, bool f32bit,
1725 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
1726{
1727 /*
1728 * Some more validations.
1729 */
1730 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
1731 {
1732 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
1733 return VERR_INVALID_PARAMETER;
1734 }
1735 size_t cbActual = cbExtra + sizeof(*pInfo);
1736#ifdef RT_ARCH_AMD64
1737 if (f32bit)
1738 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
1739 else
1740#endif
1741 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
1742 if (cbData < cbActual)
1743 {
1744 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
1745 cbData, cbActual));
1746 return VERR_INVALID_PARAMETER;
1747 }
1748
1749 /*
1750 * Validate the client id.
1751 */
1752 const uint32_t u32ClientId = pInfo->u32ClientID;
1753 unsigned i;
1754 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1755 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1756 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1757 if (pSession->aHGCMClientIds[i] == u32ClientId)
1758 break;
1759 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1760 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
1761 {
1762 static unsigned s_cErrors = 0;
1763 if (s_cErrors++ > 32)
1764 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
1765 return VERR_INVALID_HANDLE;
1766 }
1767
1768 /*
1769 * The VbglHGCMCall call will invoke the callback if the HGCM
1770 * call is performed in an ASYNC fashion. This function can
1771 * deal with cancelled requests, so we let user more requests
1772 * be interruptible (should add a flag for this later I guess).
1773 */
1774 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
1775 int rc;
1776 uint32_t fFlags = pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
1777#ifdef RT_ARCH_AMD64
1778 if (f32bit)
1779 {
1780 if (fInterruptible)
1781 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1782 else
1783 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1784 }
1785 else
1786#endif
1787 {
1788 if (fInterruptible)
1789 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1790 else
1791 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1792 }
1793 if (RT_SUCCESS(rc))
1794 {
1795 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
1796 if (pcbDataReturned)
1797 *pcbDataReturned = cbActual;
1798 }
1799 else
1800 {
1801 if ( rc != VERR_INTERRUPTED
1802 && rc != VERR_TIMEOUT)
1803 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
1804 else
1805 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
1806 }
1807 return rc;
1808}
1809
1810
1811/**
1812 * @returns VBox status code. Unlike the other HGCM IOCtls this will combine
1813 * the VbglHGCMConnect/Disconnect return code with the Info.result.
1814 *
1815 * @param pDevExt The device extension.
1816 * @param pu32ClientId The client id.
1817 * @param pcbDataReturned Where to store the amount of returned data. Can
1818 * be NULL.
1819 */
1820static int VBoxGuestCommonIOCtl_HGCMClipboardReConnect(PVBOXGUESTDEVEXT pDevExt, uint32_t *pu32ClientId, size_t *pcbDataReturned)
1821{
1822 int rc;
1823 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: Current u32ClientId=%RX32\n", pDevExt->u32ClipboardClientId));
1824
1825
1826 /*
1827 * If there is an old client, try disconnect it first.
1828 */
1829 if (pDevExt->u32ClipboardClientId != 0)
1830 {
1831 VBoxGuestHGCMDisconnectInfo Info;
1832 Info.result = VERR_WRONG_ORDER;
1833 Info.u32ClientID = pDevExt->u32ClipboardClientId;
1834 rc = VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1835 if (RT_SUCCESS(rc))
1836 {
1837 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. VbglHGCMDisconnect -> rc=%Rrc\n", rc));
1838 return rc;
1839 }
1840 if (RT_FAILURE((int32_t)Info.result))
1841 {
1842 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. Info.result=%Rrc\n", rc));
1843 return Info.result;
1844 }
1845 pDevExt->u32ClipboardClientId = 0;
1846 }
1847
1848 /*
1849 * Try connect.
1850 */
1851 VBoxGuestHGCMConnectInfo Info;
1852 Info.Loc.type = VMMDevHGCMLoc_LocalHost_Existing;
1853 strcpy(Info.Loc.u.host.achName, "VBoxSharedClipboard");
1854 Info.u32ClientID = 0;
1855 Info.result = VERR_WRONG_ORDER;
1856
1857 rc = VbglR0HGCMInternalConnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1858 if (RT_FAILURE(rc))
1859 {
1860 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1861 return rc;
1862 }
1863 if (RT_FAILURE(Info.result))
1864 {
1865 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1866 return rc;
1867 }
1868
1869 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: connected successfully u32ClientId=%RX32\n", Info.u32ClientID));
1870
1871 pDevExt->u32ClipboardClientId = Info.u32ClientID;
1872 *pu32ClientId = Info.u32ClientID;
1873 if (pcbDataReturned)
1874 *pcbDataReturned = sizeof(uint32_t);
1875
1876 return VINF_SUCCESS;
1877}
1878
1879#endif /* VBOX_WITH_HGCM */
1880
1881/**
1882 * Handle VBOXGUEST_IOCTL_CHECK_BALLOON from R3.
1883 *
1884 * Ask the host for the size of the balloon and try to set it accordingly. If
1885 * this approach fails because it's not supported, return with fHandleInR3 set
1886 * and let the user land supply memory we can lock via the other ioctl.
1887 *
1888 * @returns VBox status code.
1889 *
1890 * @param pDevExt The device extension.
1891 * @param pSession The session.
1892 * @param pInfo The output buffer.
1893 * @param pcbDataReturned Where to store the amount of returned data. Can
1894 * be NULL.
1895 */
1896static int VBoxGuestCommonIOCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1897 VBoxGuestCheckBalloonInfo *pInfo, size_t *pcbDataReturned)
1898{
1899 VMMDevGetMemBalloonChangeRequest *pReq;
1900 int rc;
1901
1902 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON\n"));
1903 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
1904 AssertRCReturn(rc, rc);
1905
1906 /*
1907 * The first user trying to query/change the balloon becomes the
1908 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
1909 */
1910 if ( pDevExt->MemBalloon.pOwner != pSession
1911 && pDevExt->MemBalloon.pOwner == NULL)
1912 pDevExt->MemBalloon.pOwner = pSession;
1913
1914 if (pDevExt->MemBalloon.pOwner == pSession)
1915 {
1916 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
1917 if (RT_SUCCESS(rc))
1918 {
1919 /*
1920 * This is a response to that event. Setting this bit means that
1921 * we request the value from the host and change the guest memory
1922 * balloon according to this value.
1923 */
1924 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
1925 rc = VbglGRPerform(&pReq->header);
1926 if (RT_SUCCESS(rc))
1927 {
1928 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
1929 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
1930
1931 pInfo->cBalloonChunks = pReq->cBalloonChunks;
1932 pInfo->fHandleInR3 = false;
1933
1934 rc = vboxGuestSetBalloonSizeKernel(pDevExt, pReq->cBalloonChunks, &pInfo->fHandleInR3);
1935 /* Ignore various out of memory failures. */
1936 if ( rc == VERR_NO_MEMORY
1937 || rc == VERR_NO_PHYS_MEMORY
1938 || rc == VERR_NO_CONT_MEMORY)
1939 rc = VINF_SUCCESS;
1940
1941 if (pcbDataReturned)
1942 *pcbDataReturned = sizeof(VBoxGuestCheckBalloonInfo);
1943 }
1944 else
1945 LogRel(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON: VbglGRPerform failed. rc=%Rrc\n", rc));
1946 VbglGRFree(&pReq->header);
1947 }
1948 }
1949 else
1950 rc = VERR_PERMISSION_DENIED;
1951
1952 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
1953 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON returns %Rrc\n", rc));
1954 return rc;
1955}
1956
1957
1958/**
1959 * Handle a request for changing the memory balloon.
1960 *
1961 * @returns VBox status code.
1962 *
1963 * @param pDevExt The device extention.
1964 * @param pSession The session.
1965 * @param pInfo The change request structure (input).
1966 * @param pcbDataReturned Where to store the amount of returned data. Can
1967 * be NULL.
1968 */
1969static int VBoxGuestCommonIOCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1970 VBoxGuestChangeBalloonInfo *pInfo, size_t *pcbDataReturned)
1971{
1972 int rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
1973 AssertRCReturn(rc, rc);
1974
1975 if (!pDevExt->MemBalloon.fUseKernelAPI)
1976 {
1977 /*
1978 * The first user trying to query/change the balloon becomes the
1979 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
1980 */
1981 if ( pDevExt->MemBalloon.pOwner != pSession
1982 && pDevExt->MemBalloon.pOwner == NULL)
1983 pDevExt->MemBalloon.pOwner = pSession;
1984
1985 if (pDevExt->MemBalloon.pOwner == pSession)
1986 {
1987 rc = vboxGuestSetBalloonSizeFromUser(pDevExt, pSession, pInfo->u64ChunkAddr, pInfo->fInflate);
1988 if (pcbDataReturned)
1989 *pcbDataReturned = 0;
1990 }
1991 else
1992 rc = VERR_PERMISSION_DENIED;
1993 }
1994 else
1995 rc = VERR_PERMISSION_DENIED;
1996
1997 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
1998 return rc;
1999}
2000
2001
2002/**
2003 * Guest backdoor logging.
2004 *
2005 * @returns VBox status code.
2006 *
2007 * @param pch The log message (need not be NULL terminated).
2008 * @param cbData Size of the buffer.
2009 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2010 */
2011static int VBoxGuestCommonIOCtl_Log(const char *pch, size_t cbData, size_t *pcbDataReturned)
2012{
2013 NOREF(pch);
2014 NOREF(cbData);
2015 Log(("%.*s", cbData, pch));
2016 if (pcbDataReturned)
2017 *pcbDataReturned = 0;
2018 return VINF_SUCCESS;
2019}
2020
2021
2022/**
2023 * Common IOCtl for user to kernel and kernel to kernel communcation.
2024 *
2025 * This function only does the basic validation and then invokes
2026 * worker functions that takes care of each specific function.
2027 *
2028 * @returns VBox status code.
2029 *
2030 * @param iFunction The requested function.
2031 * @param pDevExt The device extension.
2032 * @param pSession The client session.
2033 * @param pvData The input/output data buffer. Can be NULL depending on the function.
2034 * @param cbData The max size of the data buffer.
2035 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2036 */
2037int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2038 void *pvData, size_t cbData, size_t *pcbDataReturned)
2039{
2040 Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
2041 iFunction, pDevExt, pSession, pvData, cbData));
2042
2043 /*
2044 * Make sure the returned data size is set to zero.
2045 */
2046 if (pcbDataReturned)
2047 *pcbDataReturned = 0;
2048
2049 /*
2050 * Define some helper macros to simplify validation.
2051 */
2052#define CHECKRET_RING0(mnemonic) \
2053 do { \
2054 if (pSession->R0Process != NIL_RTR0PROCESS) \
2055 { \
2056 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
2057 pSession->Process, (uintptr_t)pSession->R0Process)); \
2058 return VERR_PERMISSION_DENIED; \
2059 } \
2060 } while (0)
2061#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
2062 do { \
2063 if (cbData < (cbMin)) \
2064 { \
2065 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
2066 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
2067 return VERR_BUFFER_OVERFLOW; \
2068 } \
2069 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
2070 { \
2071 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": Invalid pointer %p\n", pvData)); \
2072 return VERR_INVALID_POINTER; \
2073 } \
2074 } while (0)
2075
2076
2077 /*
2078 * Deal with variably sized requests first.
2079 */
2080 int rc = VINF_SUCCESS;
2081 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
2082 {
2083 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
2084 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
2085 }
2086#ifdef VBOX_WITH_HGCM
2087 /*
2088 * These ones are a bit tricky.
2089 */
2090 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
2091 {
2092 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2093 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2094 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2095 fInterruptible, false /*f32bit*/,
2096 0, cbData, pcbDataReturned);
2097 }
2098 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
2099 {
2100 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2101 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2102 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2103 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2104 false /*f32bit*/,
2105 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2106 }
2107# ifdef RT_ARCH_AMD64
2108 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
2109 {
2110 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2111 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2112 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2113 fInterruptible, true /*f32bit*/,
2114 0, cbData, pcbDataReturned);
2115 }
2116 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
2117 {
2118 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2119 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2120 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2121 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2122 true /*f32bit*/,
2123 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2124 }
2125# endif
2126#endif /* VBOX_WITH_HGCM */
2127 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
2128 {
2129 CHECKRET_MIN_SIZE("LOG", 1);
2130 rc = VBoxGuestCommonIOCtl_Log((char *)pvData, cbData, pcbDataReturned);
2131 }
2132 else
2133 {
2134 switch (iFunction)
2135 {
2136 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
2137 CHECKRET_RING0("GETVMMDEVPORT");
2138 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
2139 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
2140 break;
2141
2142 case VBOXGUEST_IOCTL_WAITEVENT:
2143 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
2144 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, pSession, (VBoxGuestWaitEventInfo *)pvData,
2145 pcbDataReturned, pSession->R0Process != NIL_RTR0PROCESS);
2146 break;
2147
2148 case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
2149 if (cbData != 0)
2150 rc = VERR_INVALID_PARAMETER;
2151 rc = VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
2152 break;
2153
2154 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
2155 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
2156 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, (VBoxGuestFilterMaskInfo *)pvData);
2157 break;
2158
2159#ifdef VBOX_WITH_HGCM
2160 case VBOXGUEST_IOCTL_HGCM_CONNECT:
2161# ifdef RT_ARCH_AMD64
2162 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
2163# endif
2164 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
2165 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
2166 break;
2167
2168 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
2169# ifdef RT_ARCH_AMD64
2170 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
2171# endif
2172 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
2173 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
2174 break;
2175
2176 case VBOXGUEST_IOCTL_CLIPBOARD_CONNECT:
2177 CHECKRET_MIN_SIZE("CLIPBOARD_CONNECT", sizeof(uint32_t));
2178 rc = VBoxGuestCommonIOCtl_HGCMClipboardReConnect(pDevExt, (uint32_t *)pvData, pcbDataReturned);
2179 break;
2180#endif /* VBOX_WITH_HGCM */
2181
2182 case VBOXGUEST_IOCTL_CHECK_BALLOON:
2183 CHECKRET_MIN_SIZE("CHECK_MEMORY_BALLOON", sizeof(VBoxGuestCheckBalloonInfo));
2184 rc = VBoxGuestCommonIOCtl_CheckMemoryBalloon(pDevExt, pSession, (VBoxGuestCheckBalloonInfo *)pvData, pcbDataReturned);
2185 break;
2186
2187 case VBOXGUEST_IOCTL_CHANGE_BALLOON:
2188 CHECKRET_MIN_SIZE("CHANGE_MEMORY_BALLOON", sizeof(VBoxGuestChangeBalloonInfo));
2189 rc = VBoxGuestCommonIOCtl_ChangeMemoryBalloon(pDevExt, pSession, (VBoxGuestChangeBalloonInfo *)pvData, pcbDataReturned);
2190 break;
2191
2192 default:
2193 {
2194 LogRel(("VBoxGuestCommonIOCtl: Unknown request iFunction=%#x Stripped size=%#x\n", iFunction,
2195 VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
2196 rc = VERR_NOT_SUPPORTED;
2197 break;
2198 }
2199 }
2200 }
2201
2202 Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
2203 return rc;
2204}
2205
2206
2207
2208/**
2209 * Common interrupt service routine.
2210 *
2211 * This deals with events and with waking up thread waiting for those events.
2212 *
2213 * @returns true if it was our interrupt, false if it wasn't.
2214 * @param pDevExt The VBoxGuest device extension.
2215 */
2216bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
2217{
2218 bool fMousePositionChanged = false;
2219 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
2220 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
2221 int rc = 0;
2222 bool fOurIrq;
2223
2224 /*
2225 * Make sure we've initalized the device extension.
2226 */
2227 if (RT_UNLIKELY(!pReq))
2228 return false;
2229
2230 /*
2231 * Enter the spinlock and check if it's our IRQ or not.
2232 *
2233 * Note! Solaris cannot do RTSemEventMultiSignal with interrupts disabled
2234 * so we're entering the spinlock without disabling them. This works
2235 * fine as long as we never called in a nested fashion.
2236 */
2237#if defined(RT_OS_SOLARIS)
2238 RTSpinlockAcquire(pDevExt->EventSpinlock, &Tmp);
2239#else
2240 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
2241#endif
2242 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
2243 if (fOurIrq)
2244 {
2245 /*
2246 * Acknowlegde events.
2247 * We don't use VbglGRPerform here as it may take another spinlocks.
2248 */
2249 pReq->header.rc = VERR_INTERNAL_ERROR;
2250 pReq->events = 0;
2251 ASMCompilerBarrier();
2252 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
2253 ASMCompilerBarrier(); /* paranoia */
2254 if (RT_SUCCESS(pReq->header.rc))
2255 {
2256 uint32_t fEvents = pReq->events;
2257 PVBOXGUESTWAIT pWait;
2258
2259 Log(("VBoxGuestCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
2260
2261 /*
2262 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
2263 */
2264 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
2265 {
2266 fMousePositionChanged = true;
2267 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
2268 }
2269
2270#ifdef VBOX_WITH_HGCM
2271 /*
2272 * The HGCM event/list is kind of different in that we evaluate all entries.
2273 */
2274 if (fEvents & VMMDEV_EVENT_HGCM)
2275 {
2276 for (pWait = pDevExt->HGCMWaitList.pHead; pWait; pWait = pWait->pNext)
2277 if ( !pWait->fResEvents
2278 && (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE))
2279 {
2280 pWait->fResEvents = VMMDEV_EVENT_HGCM;
2281 rc |= RTSemEventMultiSignal(pWait->Event);
2282 }
2283 fEvents &= ~VMMDEV_EVENT_HGCM;
2284 }
2285#endif
2286
2287 /*
2288 * Normal FIFO waiter evaluation.
2289 */
2290 fEvents |= pDevExt->f32PendingEvents;
2291 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
2292 if ( (pWait->fReqEvents & fEvents)
2293 && !pWait->fResEvents)
2294 {
2295 pWait->fResEvents = pWait->fReqEvents & fEvents;
2296 fEvents &= ~pWait->fResEvents;
2297 rc |= RTSemEventMultiSignal(pWait->Event);
2298 if (!fEvents)
2299 break;
2300 }
2301 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
2302 }
2303 else /* something is serious wrong... */
2304 Log(("VBoxGuestCommonISR: acknowledge events failed rc=%Rrc (events=%#x)!!\n",
2305 pReq->header.rc, pReq->events));
2306 }
2307 else
2308 LogFlow(("VBoxGuestCommonISR: not ours\n"));
2309
2310 /*
2311 * Work the poll and async notification queues on OSes that implements that.
2312 * Do this outside the spinlock to prevent some recursive spinlocking.
2313 */
2314#if defined(RT_OS_SOLARIS)
2315 RTSpinlockRelease(pDevExt->EventSpinlock, &Tmp);
2316#else
2317 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
2318#endif
2319
2320 if (fMousePositionChanged)
2321 {
2322 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
2323 VBoxGuestNativeISRMousePollEvent(pDevExt);
2324 }
2325
2326 Assert(rc == 0);
2327 return fOurIrq;
2328}
2329
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette