VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 29535

Last change on this file since 29535 was 29250, checked in by vboxsync, 15 years ago

iprt/asm*.h: split out asm-math.h, don't include asm-*.h from asm.h, don't include asm.h from sup.h. Fixed a couple file headers.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 83.5 KB
Line 
1/* $Id: VBoxGuest.cpp 29250 2010-05-09 17:53:58Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEFAULT
23#include "VBoxGuestInternal.h"
24#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
25#include <VBox/log.h>
26#include <iprt/mem.h>
27#include <iprt/time.h>
28#include <iprt/memobj.h>
29#include <iprt/asm.h>
30#include <iprt/asm-amd64-x86.h>
31#include <iprt/string.h>
32#include <iprt/process.h>
33#include <iprt/assert.h>
34#include <iprt/param.h>
35#ifdef VBOX_WITH_HGCM
36# include <iprt/thread.h>
37#endif
38#include "version-generated.h"
39#if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
40# include "revision-generated.h"
41#endif
42
43
44/*******************************************************************************
45* Internal Functions *
46*******************************************************************************/
47#ifdef VBOX_WITH_HGCM
48static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
49#endif
50
51
52/*******************************************************************************
53* Global Variables *
54*******************************************************************************/
55static const size_t cbChangeMemBalloonReq = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
56
57
58
59/**
60 * Reserves memory in which the VMM can relocate any guest mappings
61 * that are floating around.
62 *
63 * This operation is a little bit tricky since the VMM might not accept
64 * just any address because of address clashes between the three contexts
65 * it operates in, so use a small stack to perform this operation.
66 *
67 * @returns VBox status code (ignored).
68 * @param pDevExt The device extension.
69 */
70static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
71{
72 /*
73 * Query the required space.
74 */
75 VMMDevReqHypervisorInfo *pReq;
76 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
77 if (RT_FAILURE(rc))
78 return rc;
79 pReq->hypervisorStart = 0;
80 pReq->hypervisorSize = 0;
81 rc = VbglGRPerform(&pReq->header);
82 if (RT_FAILURE(rc)) /* this shouldn't happen! */
83 {
84 VbglGRFree(&pReq->header);
85 return rc;
86 }
87
88 /*
89 * The VMM will report back if there is nothing it wants to map, like for
90 * insance in VT-x and AMD-V mode.
91 */
92 if (pReq->hypervisorSize == 0)
93 Log(("vboxGuestInitFixateGuestMappings: nothing to do\n"));
94 else
95 {
96 /*
97 * We have to try several times since the host can be picky
98 * about certain addresses.
99 */
100 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
101 uint32_t cbHypervisor = pReq->hypervisorSize;
102 RTR0MEMOBJ ahTries[5];
103 uint32_t iTry;
104 bool fBitched = false;
105 Log(("vboxGuestInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
106 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
107 {
108 /*
109 * Reserve space, or if that isn't supported, create a object for
110 * some fictive physical memory and map that in to kernel space.
111 *
112 * To make the code a bit uglier, most systems cannot help with
113 * 4MB alignment, so we have to deal with that in addition to
114 * having two ways of getting the memory.
115 */
116 uint32_t uAlignment = _4M;
117 RTR0MEMOBJ hObj;
118 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
119 if (rc == VERR_NOT_SUPPORTED)
120 {
121 uAlignment = PAGE_SIZE;
122 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
123 }
124 if (rc == VERR_NOT_SUPPORTED)
125 {
126 if (hFictive == NIL_RTR0MEMOBJ)
127 {
128 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
129 if (RT_FAILURE(rc))
130 break;
131 hFictive = hObj;
132 }
133 uAlignment = _4M;
134 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
135 if (rc == VERR_NOT_SUPPORTED)
136 {
137 uAlignment = PAGE_SIZE;
138 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
139 }
140 }
141 if (RT_FAILURE(rc))
142 {
143 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
144 rc, cbHypervisor, uAlignment, iTry));
145 fBitched = true;
146 break;
147 }
148
149 /*
150 * Try set it.
151 */
152 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
153 pReq->header.rc = VERR_INTERNAL_ERROR;
154 pReq->hypervisorSize = cbHypervisor;
155 pReq->hypervisorStart = (uintptr_t)RTR0MemObjAddress(hObj);
156 if ( uAlignment == PAGE_SIZE
157 && pReq->hypervisorStart & (_4M - 1))
158 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
159 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
160
161 rc = VbglGRPerform(&pReq->header);
162 if (RT_SUCCESS(rc))
163 {
164 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
165 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
166 RTR0MemObjAddress(pDevExt->hGuestMappings),
167 RTR0MemObjSize(pDevExt->hGuestMappings),
168 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
169 break;
170 }
171 ahTries[iTry] = hObj;
172 }
173
174 /*
175 * Cleanup failed attempts.
176 */
177 while (iTry-- > 0)
178 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
179 if ( RT_FAILURE(rc)
180 && hFictive != NIL_RTR0PTR)
181 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
182 if (RT_FAILURE(rc) && !fBitched)
183 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
184 }
185 VbglGRFree(&pReq->header);
186
187 /*
188 * We ignore failed attempts for now.
189 */
190 return VINF_SUCCESS;
191}
192
193
194/**
195 * Undo what vboxGuestInitFixateGuestMappings did.
196 *
197 * @param pDevExt The device extension.
198 */
199static void vboxGuestTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
200{
201 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
202 {
203 /*
204 * Tell the host that we're going to free the memory we reserved for
205 * it, the free it up. (Leak the memory if anything goes wrong here.)
206 */
207 VMMDevReqHypervisorInfo *pReq;
208 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
209 if (RT_SUCCESS(rc))
210 {
211 pReq->hypervisorStart = 0;
212 pReq->hypervisorSize = 0;
213 rc = VbglGRPerform(&pReq->header);
214 VbglGRFree(&pReq->header);
215 }
216 if (RT_SUCCESS(rc))
217 {
218 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
219 AssertRC(rc);
220 }
221 else
222 LogRel(("vboxGuestTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
223
224 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
225 }
226}
227
228
229/**
230 * Sets the interrupt filter mask during initialization and termination.
231 *
232 * This will ASSUME that we're the ones in carge over the mask, so
233 * we'll simply clear all bits we don't set.
234 *
235 * @returns VBox status code (ignored).
236 * @param pDevExt The device extension.
237 * @param fMask The new mask.
238 */
239static int vboxGuestSetFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask)
240{
241 VMMDevCtlGuestFilterMask *pReq;
242 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
243 if (RT_SUCCESS(rc))
244 {
245 pReq->u32OrMask = fMask;
246 pReq->u32NotMask = ~fMask;
247 rc = VbglGRPerform(&pReq->header);
248 if ( RT_FAILURE(rc)
249 || RT_FAILURE(pReq->header.rc))
250 LogRel(("vboxGuestSetFilterMask: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
251 rc, pReq->header.rc));
252 VbglGRFree(&pReq->header);
253 }
254 return rc;
255}
256
257
258/**
259 * Report guest information to the VMMDev.
260 *
261 * @returns VBox status code.
262 * @param pDevExt The device extension.
263 * @param enmOSType The OS type to report.
264 */
265static int vboxGuestInitReportGuestInfo(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
266{
267 VMMDevReportGuestInfo *pReq;
268 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_ReportGuestInfo);
269 if (RT_SUCCESS(rc))
270 {
271 pReq->guestInfo.additionsVersion = VMMDEV_VERSION;
272 pReq->guestInfo.osType = enmOSType;
273 rc = VbglGRPerform(&pReq->header);
274 if ( RT_FAILURE(rc)
275 || RT_FAILURE(pReq->header.rc))
276 LogRel(("vboxGuestInitReportGuestInfo: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
277 rc, pReq->header.rc));
278 VbglGRFree(&pReq->header);
279 }
280 VMMDevReportGuestInfo2 *pReq2;
281 if (RT_SUCCESS(rc))
282 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq2, sizeof(*pReq2), VMMDevReq_ReportGuestInfo2);
283 if (RT_SUCCESS(rc))
284 {
285 pReq2->guestInfo.additionsMajor = VBOX_VERSION_MAJOR;
286 pReq2->guestInfo.additionsMinor = VBOX_VERSION_MINOR;
287 pReq2->guestInfo.additionsBuild = VBOX_VERSION_BUILD;
288 pReq2->guestInfo.additionsRevision = VBOX_SVN_REV;
289 pReq2->guestInfo.additionsFeatures = 0;
290 RTStrCopy(pReq2->guestInfo.szName, sizeof(pReq2->guestInfo.szName), VBOX_VERSION_STRING);
291 rc = VbglGRPerform(&pReq2->header);
292 if (rc == VERR_NOT_IMPLEMENTED) /* compatibility with older hosts */
293 rc = VINF_SUCCESS;
294 if ( RT_FAILURE(rc)
295 || RT_FAILURE(pReq2->header.rc))
296 LogRel(("vboxGuestInitReportGuestInfo2: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
297 rc, pReq2->header.rc));
298 VbglGRFree(&pReq2->header);
299 }
300 return rc;
301}
302
303
304/**
305 * Inflate the balloon by one chunk represented by an R0 memory object.
306 *
307 * The caller owns the balloon mutex.
308 *
309 * @returns IPRT status code.
310 * @param pMemObj Pointer to the R0 memory object.
311 * @param pReq The pre-allocated request for performing the VMMDev call.
312 */
313static int vboxGuestBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
314{
315 uint32_t iPage;
316 int rc;
317
318 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
319 {
320 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
321 pReq->aPhysPage[iPage] = phys;
322 }
323
324 /* Protect this memory from being accessed. Doesn't work on every platform and probably
325 * doesn't work for R3-provided memory, therefore ignore the return value. Unprotect
326 * done when object is freed. */
327 RTR0MemObjProtect(*pMemObj, 0, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, RTMEM_PROT_NONE);
328
329 pReq->fInflate = true;
330 pReq->header.size = cbChangeMemBalloonReq;
331 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
332
333 rc = VbglGRPerform(&pReq->header);
334 if (RT_FAILURE(rc))
335 LogRel(("vboxGuestBalloonInflate: VbglGRPerform failed. rc=%Rrc\n", rc));
336 return rc;
337}
338
339
340/**
341 * Deflate the balloon by one chunk - info the host and free the memory object.
342 *
343 * The caller owns the balloon mutex.
344 *
345 * @returns IPRT status code.
346 * @param pMemObj Pointer to the R0 memory object.
347 * The memory object will be freed afterwards.
348 * @param pReq The pre-allocated request for performing the VMMDev call.
349 */
350static int vboxGuestBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
351{
352 uint32_t iPage;
353 int rc;
354
355 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
356 {
357 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
358 pReq->aPhysPage[iPage] = phys;
359 }
360
361 pReq->fInflate = false;
362 pReq->header.size = cbChangeMemBalloonReq;
363 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
364
365 rc = VbglGRPerform(&pReq->header);
366 if (RT_FAILURE(rc))
367 {
368 LogRel(("vboxGuestBalloonDeflate: VbglGRPerform failed. rc=%Rrc\n", rc));
369 return rc;
370 }
371
372 /* undo previous protec call, ignore rc for reasons stated there. */
373 RTR0MemObjProtect(*pMemObj, 0, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
374 /*RTR0MemObjProtect(*pMemObj, 0, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC); - probably not safe... */
375
376 rc = RTR0MemObjFree(*pMemObj, true);
377 if (RT_FAILURE(rc))
378 {
379 LogRel(("vboxGuestBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
380 return rc;
381 }
382
383 *pMemObj = NIL_RTR0MEMOBJ;
384 return VINF_SUCCESS;
385}
386
387
388/**
389 * Inflate/deflate the memory balloon and notify the host.
390 *
391 * This is a worker used by VBoxGuestCommonIOCtl_CheckMemoryBalloon - it takes
392 * the mutex.
393 *
394 * @returns VBox status code.
395 * @param pDevExt The device extension.
396 * @param pSession The session.
397 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
398 * @param pfHandleInR3 Where to return the handle-in-ring3 indicator
399 * (VINF_SUCCESS if set).
400 */
401static int vboxGuestSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, uint32_t *pfHandleInR3)
402{
403 int rc = VINF_SUCCESS;
404
405 if (pDevExt->MemBalloon.fUseKernelAPI)
406 {
407 VMMDevChangeMemBalloon *pReq;
408 uint32_t i;
409
410 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
411 {
412 LogRel(("vboxGuestSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
413 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
414 return VERR_INVALID_PARAMETER;
415 }
416
417 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
418 return VINF_SUCCESS; /* nothing to do */
419
420 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
421 && !pDevExt->MemBalloon.paMemObj)
422 {
423 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
424 if (!pDevExt->MemBalloon.paMemObj)
425 {
426 LogRel(("VBoxGuestSetBalloonSizeKernel: no memory for paMemObj!\n"));
427 return VERR_NO_MEMORY;
428 }
429 }
430
431 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
432 if (RT_FAILURE(rc))
433 return rc;
434
435 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
436 {
437 /* inflate */
438 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
439 {
440 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
441 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
442 if (RT_FAILURE(rc))
443 {
444 if (rc == VERR_NOT_SUPPORTED)
445 {
446 /* not supported -- fall back to the R3-allocated memory. */
447 rc = VINF_SUCCESS;
448 pDevExt->MemBalloon.fUseKernelAPI = false;
449 Assert(pDevExt->MemBalloon.cChunks == 0);
450 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
451 }
452 /* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
453 * cannot allocate more memory => don't try further, just stop here */
454 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
455 break;
456 }
457
458 rc = vboxGuestBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
459 if (RT_FAILURE(rc))
460 {
461 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
462 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
463 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
464 break;
465 }
466 pDevExt->MemBalloon.cChunks++;
467 }
468 }
469 else
470 {
471 /* deflate */
472 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
473 {
474 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
475 if (RT_FAILURE(rc))
476 {
477 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
478 break;
479 }
480 pDevExt->MemBalloon.cChunks--;
481 }
482 }
483
484 VbglGRFree(&pReq->header);
485 }
486
487 /*
488 * Set the handle-in-ring3 indicator. When set Ring-3 will have to work
489 * the balloon changes via the other API.
490 */
491 *pfHandleInR3 = pDevExt->MemBalloon.fUseKernelAPI ? false : true;
492
493 return rc;
494}
495
496
497/**
498 * Inflate/deflate the balloon by one chunk.
499 *
500 * Worker for VBoxGuestCommonIOCtl_ChangeMemoryBalloon - it takes the mutex.
501 *
502 * @returns VBox status code.
503 * @param pDevExt The device extension.
504 * @param pSession The session.
505 * @param u64ChunkAddr The address of the chunk to add to / remove from the
506 * balloon.
507 * @param fInflate Inflate if true, deflate if false.
508 */
509static int vboxGuestSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
510 uint64_t u64ChunkAddr, bool fInflate)
511{
512 VMMDevChangeMemBalloon *pReq;
513 int rc = VINF_SUCCESS;
514 uint32_t i;
515 PRTR0MEMOBJ pMemObj = NULL;
516
517 if (fInflate)
518 {
519 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
520 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
521 {
522 LogRel(("vboxGuestSetBalloonSize: cannot inflate balloon, already have %u chunks (max=%u)\n",
523 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
524 return VERR_INVALID_PARAMETER;
525 }
526
527 if (!pDevExt->MemBalloon.paMemObj)
528 {
529 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
530 if (!pDevExt->MemBalloon.paMemObj)
531 {
532 LogRel(("VBoxGuestSetBalloonSizeFromUser: no memory for paMemObj!\n"));
533 return VERR_NO_MEMORY;
534 }
535 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
536 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
537 }
538 }
539 else
540 {
541 if (pDevExt->MemBalloon.cChunks == 0)
542 {
543 AssertMsgFailed(("vboxGuestSetBalloonSize: cannot decrease balloon, already at size 0\n"));
544 return VERR_INVALID_PARAMETER;
545 }
546 }
547
548 /*
549 * Enumerate all memory objects and check if the object is already registered.
550 */
551 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
552 {
553 if ( fInflate
554 && !pMemObj
555 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
556 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
557 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == u64ChunkAddr)
558 {
559 if (fInflate)
560 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
561 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
562 break;
563 }
564 }
565 if (!pMemObj)
566 {
567 if (fInflate)
568 {
569 /* no free object pointer found -- should not happen */
570 return VERR_NO_MEMORY;
571 }
572
573 /* cannot free this memory as it wasn't provided before */
574 return VERR_NOT_FOUND;
575 }
576
577 /*
578 * Try inflate / defalte the balloon as requested.
579 */
580 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
581 if (RT_FAILURE(rc))
582 return rc;
583
584 if (fInflate)
585 {
586 rc = RTR0MemObjLockUser(pMemObj, u64ChunkAddr, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
587 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
588 if (RT_SUCCESS(rc))
589 {
590 rc = vboxGuestBalloonInflate(pMemObj, pReq);
591 if (RT_SUCCESS(rc))
592 pDevExt->MemBalloon.cChunks++;
593 else
594 {
595 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
596 RTR0MemObjFree(*pMemObj, true);
597 *pMemObj = NIL_RTR0MEMOBJ;
598 }
599 }
600 }
601 else
602 {
603 rc = vboxGuestBalloonDeflate(pMemObj, pReq);
604 if (RT_SUCCESS(rc))
605 pDevExt->MemBalloon.cChunks--;
606 else
607 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
608 }
609
610 VbglGRFree(&pReq->header);
611 return rc;
612}
613
614
615/**
616 * Cleanup the memory balloon of a session.
617 *
618 * Will request the balloon mutex, so it must be valid and the caller must not
619 * own it already.
620 *
621 * @param pDevExt The device extension.
622 * @param pDevExt The session. Can be NULL at unload.
623 */
624static void vboxGuestCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
625{
626 RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
627 if ( pDevExt->MemBalloon.pOwner == pSession
628 || pSession == NULL /*unload*/)
629 {
630 if (pDevExt->MemBalloon.paMemObj)
631 {
632 VMMDevChangeMemBalloon *pReq;
633 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
634 if (RT_SUCCESS(rc))
635 {
636 uint32_t i;
637 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
638 {
639 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
640 if (RT_FAILURE(rc))
641 {
642 LogRel(("vboxGuestCloseMemBalloon: Deflate failed with rc=%Rrc. Will leak %u chunks.\n",
643 rc, pDevExt->MemBalloon.cChunks));
644 break;
645 }
646 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
647 pDevExt->MemBalloon.cChunks--;
648 }
649 VbglGRFree(&pReq->header);
650 }
651 else
652 LogRel(("vboxGuestCloseMemBalloon: Failed to allocate VMMDev request buffer (rc=%Rrc). Will leak %u chunks.\n",
653 rc, pDevExt->MemBalloon.cChunks));
654 RTMemFree(pDevExt->MemBalloon.paMemObj);
655 pDevExt->MemBalloon.paMemObj = NULL;
656 }
657
658 pDevExt->MemBalloon.pOwner = NULL;
659 }
660 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
661}
662
663
664/**
665 * Initializes the VBoxGuest device extension when the
666 * device driver is loaded.
667 *
668 * The native code locates the VMMDev on the PCI bus and retrieve
669 * the MMIO and I/O port ranges, this function will take care of
670 * mapping the MMIO memory (if present). Upon successful return
671 * the native code should set up the interrupt handler.
672 *
673 * @returns VBox status code.
674 *
675 * @param pDevExt The device extension. Allocated by the native code.
676 * @param IOPortBase The base of the I/O port range.
677 * @param pvMMIOBase The base of the MMIO memory mapping.
678 * This is optional, pass NULL if not present.
679 * @param cbMMIO The size of the MMIO memory mapping.
680 * This is optional, pass 0 if not present.
681 * @param enmOSType The guest OS type to report to the VMMDev.
682 * @param fFixedEvents Events that will be enabled upon init and no client
683 * will ever be allowed to mask.
684 */
685int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
686 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
687{
688 int rc, rc2;
689
690 /*
691 * Adjust fFixedEvents.
692 */
693#ifdef VBOX_WITH_HGCM
694 fFixedEvents |= VMMDEV_EVENT_HGCM;
695#endif
696
697 /*
698 * Initalize the data.
699 */
700 pDevExt->IOPortBase = IOPortBase;
701 pDevExt->pVMMDevMemory = NULL;
702 pDevExt->fFixedEvents = fFixedEvents;
703 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
704 pDevExt->EventSpinlock = NIL_RTSPINLOCK;
705 pDevExt->pIrqAckEvents = NULL;
706 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
707 pDevExt->WaitList.pHead = NULL;
708 pDevExt->WaitList.pTail = NULL;
709#ifdef VBOX_WITH_HGCM
710 pDevExt->HGCMWaitList.pHead = NULL;
711 pDevExt->HGCMWaitList.pTail = NULL;
712#endif
713 pDevExt->FreeList.pHead = NULL;
714 pDevExt->FreeList.pTail = NULL;
715 pDevExt->f32PendingEvents = 0;
716 pDevExt->u32MousePosChangedSeq = 0;
717 pDevExt->SessionSpinlock = NIL_RTSPINLOCK;
718 pDevExt->u32ClipboardClientId = 0;
719 pDevExt->MemBalloon.hMtx = NIL_RTSEMFASTMUTEX;
720 pDevExt->MemBalloon.cChunks = 0;
721 pDevExt->MemBalloon.cMaxChunks = 0;
722 pDevExt->MemBalloon.fUseKernelAPI = true;
723 pDevExt->MemBalloon.paMemObj = NULL;
724 pDevExt->MemBalloon.pOwner = NULL;
725
726 /*
727 * If there is an MMIO region validate the version and size.
728 */
729 if (pvMMIOBase)
730 {
731 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
732 Assert(cbMMIO);
733 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
734 && pVMMDev->u32Size >= 32
735 && pVMMDev->u32Size <= cbMMIO)
736 {
737 pDevExt->pVMMDevMemory = pVMMDev;
738 Log(("VBoxGuestInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
739 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
740 }
741 else /* try live without it. */
742 LogRel(("VBoxGuestInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
743 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
744 }
745
746 /*
747 * Create the wait and session spinlocks as well as the ballooning mutex.
748 */
749 rc = RTSpinlockCreate(&pDevExt->EventSpinlock);
750 if (RT_SUCCESS(rc))
751 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock);
752 if (RT_FAILURE(rc))
753 {
754 LogRel(("VBoxGuestInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
755 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
756 RTSpinlockDestroy(pDevExt->EventSpinlock);
757 return rc;
758 }
759
760 rc = RTSemFastMutexCreate(&pDevExt->MemBalloon.hMtx);
761 if (RT_FAILURE(rc))
762 {
763 LogRel(("VBoxGuestInitDevExt: failed to create mutex, rc=%Rrc!\n", rc));
764 RTSpinlockDestroy(pDevExt->SessionSpinlock);
765 RTSpinlockDestroy(pDevExt->EventSpinlock);
766 return rc;
767 }
768
769 /*
770 * Initialize the guest library and report the guest info back to VMMDev,
771 * set the interrupt control filter mask, and fixate the guest mappings
772 * made by the VMM.
773 */
774 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
775 if (RT_SUCCESS(rc))
776 {
777 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
778 if (RT_SUCCESS(rc))
779 {
780 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
781 Assert(pDevExt->PhysIrqAckEvents != 0);
782
783 rc = vboxGuestInitReportGuestInfo(pDevExt, enmOSType);
784 if (RT_SUCCESS(rc))
785 {
786 rc = vboxGuestSetFilterMask(pDevExt, fFixedEvents);
787 if (RT_SUCCESS(rc))
788 {
789 /*
790 * Disable guest graphics capability by default. The guest specific
791 * graphics driver will re-enable this when it is necessary.
792 */
793 rc = VBoxGuestSetGuestCapabilities(0, VMMDEV_GUEST_SUPPORTS_GRAPHICS);
794 if (RT_SUCCESS(rc))
795 {
796 vboxGuestInitFixateGuestMappings(pDevExt);
797 Log(("VBoxGuestInitDevExt: returns success\n"));
798 return VINF_SUCCESS;
799 }
800
801 LogRel(("VBoxGuestInitDevExt: VBoxGuestSetGuestCapabilities failed, rc=%Rrc\n", rc));
802 }
803 else
804 LogRel(("VBoxGuestInitDevExt: vboxGuestSetFilterMask failed, rc=%Rrc\n", rc));
805 }
806 else
807 LogRel(("VBoxGuestInitDevExt: vboxGuestInitReportGuestInfo failed, rc=%Rrc\n", rc));
808
809 VbglGRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
810 }
811 else
812 LogRel(("VBoxGuestInitDevExt: VBoxGRAlloc failed, rc=%Rrc\n", rc));
813
814 VbglTerminate();
815 }
816 else
817 LogRel(("VBoxGuestInitDevExt: VbglInit failed, rc=%Rrc\n", rc));
818
819 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
820 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
821 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
822 return rc; /* (failed) */
823}
824
825
826/**
827 * Deletes all the items in a wait chain.
828 * @param pWait The head of the chain.
829 */
830static void VBoxGuestDeleteWaitList(PVBOXGUESTWAITLIST pList)
831{
832 while (pList->pHead)
833 {
834 int rc2;
835 PVBOXGUESTWAIT pWait = pList->pHead;
836 pList->pHead = pWait->pNext;
837
838 pWait->pNext = NULL;
839 pWait->pPrev = NULL;
840 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
841 pWait->Event = NIL_RTSEMEVENTMULTI;
842 pWait->pSession = NULL;
843 RTMemFree(pWait);
844 }
845 pList->pHead = NULL;
846 pList->pTail = NULL;
847}
848
849
850/**
851 * Destroys the VBoxGuest device extension.
852 *
853 * The native code should call this before the driver is loaded,
854 * but don't call this on shutdown.
855 *
856 * @param pDevExt The device extension.
857 */
858void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
859{
860 int rc2;
861 Log(("VBoxGuestDeleteDevExt:\n"));
862 Log(("VBoxGuest: The additions driver is terminating.\n"));
863
864 /*
865 * Clean up the bits that involves the host first.
866 */
867 vboxGuestTermUnfixGuestMappings(pDevExt);
868 VBoxGuestSetGuestCapabilities(0, UINT32_MAX); /* clears all capabilities */
869 vboxGuestSetFilterMask(pDevExt, 0); /* filter all events */
870 vboxGuestCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
871
872 /*
873 * Cleanup all the other resources.
874 */
875 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
876 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
877 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
878
879 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
880#ifdef VBOX_WITH_HGCM
881 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
882#endif
883 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
884
885 VbglTerminate();
886
887 pDevExt->pVMMDevMemory = NULL;
888
889 pDevExt->IOPortBase = 0;
890 pDevExt->pIrqAckEvents = NULL;
891}
892
893
894/**
895 * Creates a VBoxGuest user session.
896 *
897 * The native code calls this when a ring-3 client opens the device.
898 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
899 *
900 * @returns VBox status code.
901 * @param pDevExt The device extension.
902 * @param ppSession Where to store the session on success.
903 */
904int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
905{
906 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
907 if (RT_UNLIKELY(!pSession))
908 {
909 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
910 return VERR_NO_MEMORY;
911 }
912
913 pSession->Process = RTProcSelf();
914 pSession->R0Process = RTR0ProcHandleSelf();
915 pSession->pDevExt = pDevExt;
916
917 *ppSession = pSession;
918 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
919 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
920 return VINF_SUCCESS;
921}
922
923
924/**
925 * Creates a VBoxGuest kernel session.
926 *
927 * The native code calls this when a ring-0 client connects to the device.
928 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
929 *
930 * @returns VBox status code.
931 * @param pDevExt The device extension.
932 * @param ppSession Where to store the session on success.
933 */
934int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
935{
936 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
937 if (RT_UNLIKELY(!pSession))
938 {
939 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
940 return VERR_NO_MEMORY;
941 }
942
943 pSession->Process = NIL_RTPROCESS;
944 pSession->R0Process = NIL_RTR0PROCESS;
945 pSession->pDevExt = pDevExt;
946
947 *ppSession = pSession;
948 LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
949 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
950 return VINF_SUCCESS;
951}
952
953
954
955/**
956 * Closes a VBoxGuest session.
957 *
958 * @param pDevExt The device extension.
959 * @param pSession The session to close (and free).
960 */
961void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
962{
963 unsigned i; NOREF(i);
964 Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
965 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
966
967#ifdef VBOX_WITH_HGCM
968 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
969 if (pSession->aHGCMClientIds[i])
970 {
971 VBoxGuestHGCMDisconnectInfo Info;
972 Info.result = 0;
973 Info.u32ClientID = pSession->aHGCMClientIds[i];
974 pSession->aHGCMClientIds[i] = 0;
975 Log(("VBoxGuestCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
976 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
977 }
978#endif
979
980 pSession->pDevExt = NULL;
981 pSession->Process = NIL_RTPROCESS;
982 pSession->R0Process = NIL_RTR0PROCESS;
983 vboxGuestCloseMemBalloon(pDevExt, pSession);
984 RTMemFree(pSession);
985}
986
987
988/**
989 * Links the wait-for-event entry into the tail of the given list.
990 *
991 * @param pList The list to link it into.
992 * @param pWait The wait for event entry to append.
993 */
994DECLINLINE(void) VBoxGuestWaitAppend(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
995{
996 const PVBOXGUESTWAIT pTail = pList->pTail;
997 pWait->pNext = NULL;
998 pWait->pPrev = pTail;
999 if (pTail)
1000 pTail->pNext = pWait;
1001 else
1002 pList->pHead = pWait;
1003 pList->pTail = pWait;
1004}
1005
1006
1007/**
1008 * Unlinks the wait-for-event entry.
1009 *
1010 * @param pList The list to unlink it from.
1011 * @param pWait The wait for event entry to unlink.
1012 */
1013DECLINLINE(void) VBoxGuestWaitUnlink(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
1014{
1015 const PVBOXGUESTWAIT pPrev = pWait->pPrev;
1016 const PVBOXGUESTWAIT pNext = pWait->pNext;
1017 if (pNext)
1018 pNext->pPrev = pPrev;
1019 else
1020 pList->pTail = pPrev;
1021 if (pPrev)
1022 pPrev->pNext = pNext;
1023 else
1024 pList->pHead = pNext;
1025}
1026
1027
1028/**
1029 * Allocates a wiat-for-event entry.
1030 *
1031 * @returns The wait-for-event entry.
1032 * @param pDevExt The device extension.
1033 * @param pSession The session that's allocating this. Can be NULL.
1034 */
1035static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1036{
1037 /*
1038 * Allocate it one way or the other.
1039 */
1040 PVBOXGUESTWAIT pWait = pDevExt->FreeList.pTail;
1041 if (pWait)
1042 {
1043 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1044 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1045
1046 pWait = pDevExt->FreeList.pTail;
1047 if (pWait)
1048 VBoxGuestWaitUnlink(&pDevExt->FreeList, pWait);
1049
1050 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1051 }
1052 if (!pWait)
1053 {
1054 static unsigned s_cErrors = 0;
1055 int rc;
1056
1057 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1058 if (!pWait)
1059 {
1060 if (s_cErrors++ < 32)
1061 LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
1062 return NULL;
1063 }
1064
1065 rc = RTSemEventMultiCreate(&pWait->Event);
1066 if (RT_FAILURE(rc))
1067 {
1068 if (s_cErrors++ < 32)
1069 LogRel(("VBoxGuestCommonIOCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
1070 RTMemFree(pWait);
1071 return NULL;
1072 }
1073 }
1074
1075 /*
1076 * Zero members just as an precaution.
1077 */
1078 pWait->pNext = NULL;
1079 pWait->pPrev = NULL;
1080 pWait->fReqEvents = 0;
1081 pWait->fResEvents = 0;
1082 pWait->pSession = pSession;
1083#ifdef VBOX_WITH_HGCM
1084 pWait->pHGCMReq = NULL;
1085#endif
1086 RTSemEventMultiReset(pWait->Event);
1087 return pWait;
1088}
1089
1090
1091/**
1092 * Frees the wait-for-event entry.
1093 * The caller must own the wait spinlock!
1094 *
1095 * @param pDevExt The device extension.
1096 * @param pWait The wait-for-event entry to free.
1097 */
1098static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1099{
1100 pWait->fReqEvents = 0;
1101 pWait->fResEvents = 0;
1102#ifdef VBOX_WITH_HGCM
1103 pWait->pHGCMReq = NULL;
1104#endif
1105 VBoxGuestWaitAppend(&pDevExt->FreeList, pWait);
1106}
1107
1108
1109/**
1110 * Frees the wait-for-event entry.
1111 *
1112 * @param pDevExt The device extension.
1113 * @param pWait The wait-for-event entry to free.
1114 */
1115static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1116{
1117 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1118 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1119 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1120 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1121}
1122
1123
1124/**
1125 * Modifies the guest capabilities.
1126 *
1127 * Should be called during driver init and termination.
1128 *
1129 * @returns VBox status code.
1130 * @param fOr The Or mask (what to enable).
1131 * @param fNot The Not mask (what to disable).
1132 */
1133int VBoxGuestSetGuestCapabilities(uint32_t fOr, uint32_t fNot)
1134{
1135 VMMDevReqGuestCapabilities2 *pReq;
1136 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
1137 if (RT_FAILURE(rc))
1138 {
1139 Log(("VBoxGuestSetGuestCapabilities: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1140 sizeof(*pReq), sizeof(*pReq), rc));
1141 return rc;
1142 }
1143
1144 pReq->u32OrMask = fOr;
1145 pReq->u32NotMask = fNot;
1146
1147 rc = VbglGRPerform(&pReq->header);
1148 if (RT_FAILURE(rc))
1149 Log(("VBoxGuestSetGuestCapabilities:VbglGRPerform failed, rc=%Rrc!\n", rc));
1150 else if (RT_FAILURE(pReq->header.rc))
1151 {
1152 Log(("VBoxGuestSetGuestCapabilities: The request failed; VMMDev rc=%Rrc!\n", pReq->header.rc));
1153 rc = pReq->header.rc;
1154 }
1155
1156 VbglGRFree(&pReq->header);
1157 return rc;
1158}
1159
1160
1161/**
1162 * Implements the fast (no input or output) type of IOCtls.
1163 *
1164 * This is currently just a placeholder stub inherited from the support driver code.
1165 *
1166 * @returns VBox status code.
1167 * @param iFunction The IOCtl function number.
1168 * @param pDevExt The device extension.
1169 * @param pSession The session.
1170 */
1171int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1172{
1173 Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1174
1175 NOREF(iFunction);
1176 NOREF(pDevExt);
1177 NOREF(pSession);
1178 return VERR_NOT_SUPPORTED;
1179}
1180
1181
1182/**
1183 * Return the VMM device port.
1184 *
1185 * returns IPRT status code.
1186 * @param pDevExt The device extension.
1187 * @param pInfo The request info.
1188 * @param pcbDataReturned (out) contains the number of bytes to return.
1189 */
1190static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
1191{
1192 Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
1193 pInfo->portAddress = pDevExt->IOPortBase;
1194 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
1195 if (pcbDataReturned)
1196 *pcbDataReturned = sizeof(*pInfo);
1197 return VINF_SUCCESS;
1198}
1199
1200
1201/**
1202 * Worker VBoxGuestCommonIOCtl_WaitEvent.
1203 * The caller enters the spinlock, we may or may not leave it.
1204 *
1205 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
1206 */
1207DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWaitEventInfo *pInfo,
1208 int iEvent, const uint32_t fReqEvents, PRTSPINLOCKTMP pTmp)
1209{
1210 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
1211 if (fMatches)
1212 {
1213 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
1214 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, pTmp);
1215
1216 pInfo->u32EventFlagsOut = fMatches;
1217 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1218 if (fReqEvents & ~((uint32_t)1 << iEvent))
1219 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1220 else
1221 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1222 return VINF_SUCCESS;
1223 }
1224 return VERR_TIMEOUT;
1225}
1226
1227
1228static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1229 VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned, bool fInterruptible)
1230{
1231 pInfo->u32EventFlagsOut = 0;
1232 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1233 if (pcbDataReturned)
1234 *pcbDataReturned = sizeof(*pInfo);
1235
1236 /*
1237 * Copy and verify the input mask.
1238 */
1239 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
1240 int iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
1241 if (RT_UNLIKELY(iEvent < 0))
1242 {
1243 Log(("VBoxGuestCommonIOCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
1244 return VERR_INVALID_PARAMETER;
1245 }
1246
1247 /*
1248 * Check the condition up front, before doing the wait-for-event allocations.
1249 */
1250 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1251 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1252 int rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
1253 if (rc == VINF_SUCCESS)
1254 return rc;
1255 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1256
1257 if (!pInfo->u32TimeoutIn)
1258 {
1259 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1260 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
1261 return VERR_TIMEOUT;
1262 }
1263
1264 PVBOXGUESTWAIT pWait = VBoxGuestWaitAlloc(pDevExt, pSession);
1265 if (!pWait)
1266 return VERR_NO_MEMORY;
1267 pWait->fReqEvents = fReqEvents;
1268
1269 /*
1270 * We've got the wait entry now, re-enter the spinlock and check for the condition.
1271 * If the wait condition is met, return.
1272 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
1273 */
1274 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1275 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
1276 if (rc == VINF_SUCCESS)
1277 {
1278 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1279 return rc;
1280 }
1281 VBoxGuestWaitAppend(&pDevExt->WaitList, pWait);
1282 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1283
1284 if (fInterruptible)
1285 rc = RTSemEventMultiWaitNoResume(pWait->Event,
1286 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1287 else
1288 rc = RTSemEventMultiWait(pWait->Event,
1289 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1290
1291 /*
1292 * There is one special case here and that's when the semaphore is
1293 * destroyed upon device driver unload. This shouldn't happen of course,
1294 * but in case it does, just get out of here ASAP.
1295 */
1296 if (rc == VERR_SEM_DESTROYED)
1297 return rc;
1298
1299 /*
1300 * Unlink the wait item and dispose of it.
1301 */
1302 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1303 VBoxGuestWaitUnlink(&pDevExt->WaitList, pWait);
1304 const uint32_t fResEvents = pWait->fResEvents;
1305 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1306 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1307
1308 /*
1309 * Now deal with the return code.
1310 */
1311 if ( fResEvents
1312 && fResEvents != UINT32_MAX)
1313 {
1314 pInfo->u32EventFlagsOut = fResEvents;
1315 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1316 if (fReqEvents & ~((uint32_t)1 << iEvent))
1317 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1318 else
1319 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1320 rc = VINF_SUCCESS;
1321 }
1322 else if ( fResEvents == UINT32_MAX
1323 || rc == VERR_INTERRUPTED)
1324 {
1325 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
1326 rc = VERR_INTERRUPTED;
1327 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
1328 }
1329 else if (rc == VERR_TIMEOUT)
1330 {
1331 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1332 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
1333 }
1334 else
1335 {
1336 if (RT_SUCCESS(rc))
1337 {
1338 static unsigned s_cErrors = 0;
1339 if (s_cErrors++ < 32)
1340 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
1341 rc = VERR_INTERNAL_ERROR;
1342 }
1343 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1344 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc\n", rc));
1345 }
1346
1347 return rc;
1348}
1349
1350
1351static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1352{
1353 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1354#if defined(RT_OS_SOLARIS)
1355 RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER;
1356#endif
1357 PVBOXGUESTWAIT pWait;
1358 int rc = 0;
1359
1360 Log(("VBoxGuestCommonIOCtl: CANCEL_ALL_WAITEVENTS\n"));
1361
1362 /*
1363 * Walk the event list and wake up anyone with a matching session.
1364 *
1365 * Note! On Solaris we have to do really ugly stuff here because
1366 * RTSemEventMultiSignal cannot be called with interrupts disabled.
1367 * The hack is racy, but what we can we do... (Eliminate this
1368 * termination hack, perhaps?)
1369 */
1370#if defined(RT_OS_SOLARIS)
1371 RTThreadPreemptDisable(&State);
1372 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1373 do
1374 {
1375 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
1376 if ( pWait->pSession == pSession
1377 && pWait->fResEvents != UINT32_MAX)
1378 {
1379 RTSEMEVENTMULTI hEvent = pWait->Event;
1380 pWait->fResEvents = UINT32_MAX;
1381 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1382 /* HACK ALRET! This races wakeup + reuse! */
1383 rc |= RTSemEventMultiSignal(hEvent);
1384 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1385 break;
1386 }
1387 } while (pWait);
1388 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1389 RTThreadPreemptDisable(&State);
1390#else
1391 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1392 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
1393 if (pWait->pSession == pSession)
1394 {
1395 pWait->fResEvents = UINT32_MAX;
1396 rc |= RTSemEventMultiSignal(pWait->Event);
1397 }
1398 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1399#endif
1400 Assert(rc == 0);
1401
1402 return VINF_SUCCESS;
1403}
1404
1405
1406static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1407 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
1408{
1409 Log(("VBoxGuestCommonIOCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
1410
1411 /*
1412 * Validate the header and request size.
1413 */
1414 const VMMDevRequestType enmType = pReqHdr->requestType;
1415 const uint32_t cbReq = pReqHdr->size;
1416 const uint32_t cbMinSize = vmmdevGetRequestSize(enmType);
1417 if (cbReq < cbMinSize)
1418 {
1419 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
1420 cbReq, cbMinSize, enmType));
1421 return VERR_INVALID_PARAMETER;
1422 }
1423 if (cbReq > cbData)
1424 {
1425 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
1426 cbData, cbReq, enmType));
1427 return VERR_INVALID_PARAMETER;
1428 }
1429 int rc = VbglGRVerify(pReqHdr, cbData);
1430 if (RT_FAILURE(rc))
1431 {
1432 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc!!\n",
1433 cbData, cbReq, enmType, rc));
1434 return rc;
1435 }
1436
1437 /*
1438 * Make a copy of the request in the physical memory heap so
1439 * the VBoxGuestLibrary can more easily deal with the request.
1440 * (This is really a waste of time since the OS or the OS specific
1441 * code has already buffered or locked the input/output buffer, but
1442 * it does makes things a bit simpler wrt to phys address.)
1443 */
1444 VMMDevRequestHeader *pReqCopy;
1445 rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
1446 if (RT_FAILURE(rc))
1447 {
1448 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1449 cbReq, cbReq, rc));
1450 return rc;
1451 }
1452 memcpy(pReqCopy, pReqHdr, cbReq);
1453
1454 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
1455 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
1456
1457 rc = VbglGRPerform(pReqCopy);
1458 if ( RT_SUCCESS(rc)
1459 && RT_SUCCESS(pReqCopy->rc))
1460 {
1461 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
1462 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
1463
1464 memcpy(pReqHdr, pReqCopy, cbReq);
1465 if (pcbDataReturned)
1466 *pcbDataReturned = cbReq;
1467 }
1468 else if (RT_FAILURE(rc))
1469 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
1470 else
1471 {
1472 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
1473 rc = pReqCopy->rc;
1474 }
1475
1476 VbglGRFree(pReqCopy);
1477 return rc;
1478}
1479
1480
1481static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, VBoxGuestFilterMaskInfo *pInfo)
1482{
1483 VMMDevCtlGuestFilterMask *pReq;
1484 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
1485 if (RT_FAILURE(rc))
1486 {
1487 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1488 sizeof(*pReq), sizeof(*pReq), rc));
1489 return rc;
1490 }
1491
1492 pReq->u32OrMask = pInfo->u32OrMask;
1493 pReq->u32NotMask = pInfo->u32NotMask;
1494 pReq->u32NotMask &= ~pDevExt->fFixedEvents; /* don't permit these to be cleared! */
1495 rc = VbglGRPerform(&pReq->header);
1496 if (RT_FAILURE(rc))
1497 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: VbglGRPerform failed, rc=%Rrc!\n", rc));
1498 else if (RT_FAILURE(pReq->header.rc))
1499 {
1500 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: The request failed; VMMDev rc=%Rrc!\n", pReq->header.rc));
1501 rc = pReq->header.rc;
1502 }
1503
1504 VbglGRFree(&pReq->header);
1505 return rc;
1506}
1507
1508#ifdef VBOX_WITH_HGCM
1509
1510AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
1511
1512/** Worker for VBoxGuestHGCMAsyncWaitCallback*. */
1513static int VBoxGuestHGCMAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
1514 bool fInterruptible, uint32_t cMillies)
1515{
1516
1517 /*
1518 * Check to see if the condition was met by the time we got here.
1519 *
1520 * We create a simple poll loop here for dealing with out-of-memory
1521 * conditions since the caller isn't necessarily able to deal with
1522 * us returning too early.
1523 */
1524 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1525 PVBOXGUESTWAIT pWait;
1526 for (;;)
1527 {
1528 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1529 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1530 {
1531 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1532 return VINF_SUCCESS;
1533 }
1534 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1535
1536 pWait = VBoxGuestWaitAlloc(pDevExt, NULL);
1537 if (pWait)
1538 break;
1539 if (fInterruptible)
1540 return VERR_INTERRUPTED;
1541 RTThreadSleep(1);
1542 }
1543 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
1544 pWait->pHGCMReq = pHdr;
1545
1546 /*
1547 * Re-enter the spinlock and re-check for the condition.
1548 * If the condition is met, return.
1549 * Otherwise link us into the HGCM wait list and go to sleep.
1550 */
1551 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1552 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1553 {
1554 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1555 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1556 return VINF_SUCCESS;
1557 }
1558 VBoxGuestWaitAppend(&pDevExt->HGCMWaitList, pWait);
1559 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1560
1561 int rc;
1562 if (fInterruptible)
1563 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
1564 else
1565 rc = RTSemEventMultiWait(pWait->Event, cMillies);
1566 if (rc == VERR_SEM_DESTROYED)
1567 return rc;
1568
1569 /*
1570 * Unlink, free and return.
1571 */
1572 if ( RT_FAILURE(rc)
1573 && rc != VERR_TIMEOUT
1574 && ( !fInterruptible
1575 || rc != VERR_INTERRUPTED))
1576 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
1577
1578 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1579 VBoxGuestWaitUnlink(&pDevExt->HGCMWaitList, pWait);
1580 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1581 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1582 return rc;
1583}
1584
1585
1586/**
1587 * This is a callback for dealing with async waits.
1588 *
1589 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1590 */
1591static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
1592{
1593 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1594 Log(("VBoxGuestHGCMAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
1595 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1596 pDevExt,
1597 false /* fInterruptible */,
1598 u32User /* cMillies */);
1599}
1600
1601
1602/**
1603 * This is a callback for dealing with async waits with a timeout.
1604 *
1605 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1606 */
1607static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr,
1608 void *pvUser, uint32_t u32User)
1609{
1610 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1611 Log(("VBoxGuestHGCMAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
1612 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1613 pDevExt,
1614 true /* fInterruptible */,
1615 u32User /* cMillies */ );
1616
1617}
1618
1619
1620static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMConnectInfo *pInfo,
1621 size_t *pcbDataReturned)
1622{
1623 /*
1624 * The VbglHGCMConnect call will invoke the callback if the HGCM
1625 * call is performed in an ASYNC fashion. The function is not able
1626 * to deal with cancelled requests.
1627 */
1628 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
1629 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
1630 ? pInfo->Loc.u.host.achName : "<not local host>"));
1631
1632 int rc = VbglR0HGCMInternalConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1633 if (RT_SUCCESS(rc))
1634 {
1635 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
1636 pInfo->u32ClientID, pInfo->result, rc));
1637 if (RT_SUCCESS(pInfo->result))
1638 {
1639 /*
1640 * Append the client id to the client id table.
1641 * If the table has somehow become filled up, we'll disconnect the session.
1642 */
1643 unsigned i;
1644 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1645 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1646 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1647 if (!pSession->aHGCMClientIds[i])
1648 {
1649 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
1650 break;
1651 }
1652 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1653 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1654 {
1655 static unsigned s_cErrors = 0;
1656 if (s_cErrors++ < 32)
1657 LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
1658
1659 VBoxGuestHGCMDisconnectInfo Info;
1660 Info.result = 0;
1661 Info.u32ClientID = pInfo->u32ClientID;
1662 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1663 return VERR_TOO_MANY_OPEN_FILES;
1664 }
1665 }
1666 if (pcbDataReturned)
1667 *pcbDataReturned = sizeof(*pInfo);
1668 }
1669 return rc;
1670}
1671
1672
1673static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
1674 size_t *pcbDataReturned)
1675{
1676 /*
1677 * Validate the client id and invalidate its entry while we're in the call.
1678 */
1679 const uint32_t u32ClientId = pInfo->u32ClientID;
1680 unsigned i;
1681 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1682 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1683 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1684 if (pSession->aHGCMClientIds[i] == u32ClientId)
1685 {
1686 pSession->aHGCMClientIds[i] = UINT32_MAX;
1687 break;
1688 }
1689 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1690 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1691 {
1692 static unsigned s_cErrors = 0;
1693 if (s_cErrors++ > 32)
1694 LogRel(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
1695 return VERR_INVALID_HANDLE;
1696 }
1697
1698 /*
1699 * The VbglHGCMConnect call will invoke the callback if the HGCM
1700 * call is performed in an ASYNC fashion. The function is not able
1701 * to deal with cancelled requests.
1702 */
1703 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
1704 int rc = VbglR0HGCMInternalDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1705 if (RT_SUCCESS(rc))
1706 {
1707 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
1708 if (pcbDataReturned)
1709 *pcbDataReturned = sizeof(*pInfo);
1710 }
1711
1712 /* Update the client id array according to the result. */
1713 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1714 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
1715 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
1716 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1717
1718 return rc;
1719}
1720
1721
1722static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt,
1723 PVBOXGUESTSESSION pSession,
1724 VBoxGuestHGCMCallInfo *pInfo,
1725 uint32_t cMillies, bool fInterruptible, bool f32bit,
1726 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
1727{
1728 /*
1729 * Some more validations.
1730 */
1731 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
1732 {
1733 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
1734 return VERR_INVALID_PARAMETER;
1735 }
1736 size_t cbActual = cbExtra + sizeof(*pInfo);
1737#ifdef RT_ARCH_AMD64
1738 if (f32bit)
1739 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
1740 else
1741#endif
1742 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
1743 if (cbData < cbActual)
1744 {
1745 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
1746 cbData, cbActual));
1747 return VERR_INVALID_PARAMETER;
1748 }
1749
1750 /*
1751 * Validate the client id.
1752 */
1753 const uint32_t u32ClientId = pInfo->u32ClientID;
1754 unsigned i;
1755 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1756 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1757 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1758 if (pSession->aHGCMClientIds[i] == u32ClientId)
1759 break;
1760 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1761 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
1762 {
1763 static unsigned s_cErrors = 0;
1764 if (s_cErrors++ > 32)
1765 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
1766 return VERR_INVALID_HANDLE;
1767 }
1768
1769 /*
1770 * The VbglHGCMCall call will invoke the callback if the HGCM
1771 * call is performed in an ASYNC fashion. This function can
1772 * deal with cancelled requests, so we let user more requests
1773 * be interruptible (should add a flag for this later I guess).
1774 */
1775 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
1776 int rc;
1777 uint32_t fFlags = pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
1778#ifdef RT_ARCH_AMD64
1779 if (f32bit)
1780 {
1781 if (fInterruptible)
1782 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1783 else
1784 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1785 }
1786 else
1787#endif
1788 {
1789 if (fInterruptible)
1790 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1791 else
1792 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1793 }
1794 if (RT_SUCCESS(rc))
1795 {
1796 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
1797 if (pcbDataReturned)
1798 *pcbDataReturned = cbActual;
1799 }
1800 else
1801 {
1802 if ( rc != VERR_INTERRUPTED
1803 && rc != VERR_TIMEOUT)
1804 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
1805 else
1806 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
1807 }
1808 return rc;
1809}
1810
1811
1812/**
1813 * @returns VBox status code. Unlike the other HGCM IOCtls this will combine
1814 * the VbglHGCMConnect/Disconnect return code with the Info.result.
1815 *
1816 * @param pDevExt The device extension.
1817 * @param pu32ClientId The client id.
1818 * @param pcbDataReturned Where to store the amount of returned data. Can
1819 * be NULL.
1820 */
1821static int VBoxGuestCommonIOCtl_HGCMClipboardReConnect(PVBOXGUESTDEVEXT pDevExt, uint32_t *pu32ClientId, size_t *pcbDataReturned)
1822{
1823 int rc;
1824 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: Current u32ClientId=%RX32\n", pDevExt->u32ClipboardClientId));
1825
1826
1827 /*
1828 * If there is an old client, try disconnect it first.
1829 */
1830 if (pDevExt->u32ClipboardClientId != 0)
1831 {
1832 VBoxGuestHGCMDisconnectInfo Info;
1833 Info.result = VERR_WRONG_ORDER;
1834 Info.u32ClientID = pDevExt->u32ClipboardClientId;
1835 rc = VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1836 if (RT_SUCCESS(rc))
1837 {
1838 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. VbglHGCMDisconnect -> rc=%Rrc\n", rc));
1839 return rc;
1840 }
1841 if (RT_FAILURE((int32_t)Info.result))
1842 {
1843 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. Info.result=%Rrc\n", rc));
1844 return Info.result;
1845 }
1846 pDevExt->u32ClipboardClientId = 0;
1847 }
1848
1849 /*
1850 * Try connect.
1851 */
1852 VBoxGuestHGCMConnectInfo Info;
1853 Info.Loc.type = VMMDevHGCMLoc_LocalHost_Existing;
1854 strcpy(Info.Loc.u.host.achName, "VBoxSharedClipboard");
1855 Info.u32ClientID = 0;
1856 Info.result = VERR_WRONG_ORDER;
1857
1858 rc = VbglR0HGCMInternalConnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1859 if (RT_FAILURE(rc))
1860 {
1861 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1862 return rc;
1863 }
1864 if (RT_FAILURE(Info.result))
1865 {
1866 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1867 return rc;
1868 }
1869
1870 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: connected successfully u32ClientId=%RX32\n", Info.u32ClientID));
1871
1872 pDevExt->u32ClipboardClientId = Info.u32ClientID;
1873 *pu32ClientId = Info.u32ClientID;
1874 if (pcbDataReturned)
1875 *pcbDataReturned = sizeof(uint32_t);
1876
1877 return VINF_SUCCESS;
1878}
1879
1880#endif /* VBOX_WITH_HGCM */
1881
1882/**
1883 * Handle VBOXGUEST_IOCTL_CHECK_BALLOON from R3.
1884 *
1885 * Ask the host for the size of the balloon and try to set it accordingly. If
1886 * this approach fails because it's not supported, return with fHandleInR3 set
1887 * and let the user land supply memory we can lock via the other ioctl.
1888 *
1889 * @returns VBox status code.
1890 *
1891 * @param pDevExt The device extension.
1892 * @param pSession The session.
1893 * @param pInfo The output buffer.
1894 * @param pcbDataReturned Where to store the amount of returned data. Can
1895 * be NULL.
1896 */
1897static int VBoxGuestCommonIOCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1898 VBoxGuestCheckBalloonInfo *pInfo, size_t *pcbDataReturned)
1899{
1900 VMMDevGetMemBalloonChangeRequest *pReq;
1901 int rc;
1902
1903 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON\n"));
1904 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
1905 AssertRCReturn(rc, rc);
1906
1907 /*
1908 * The first user trying to query/change the balloon becomes the
1909 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
1910 */
1911 if ( pDevExt->MemBalloon.pOwner != pSession
1912 && pDevExt->MemBalloon.pOwner == NULL)
1913 pDevExt->MemBalloon.pOwner = pSession;
1914
1915 if (pDevExt->MemBalloon.pOwner == pSession)
1916 {
1917 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
1918 if (RT_SUCCESS(rc))
1919 {
1920 /*
1921 * This is a response to that event. Setting this bit means that
1922 * we request the value from the host and change the guest memory
1923 * balloon according to this value.
1924 */
1925 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
1926 rc = VbglGRPerform(&pReq->header);
1927 if (RT_SUCCESS(rc))
1928 {
1929 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
1930 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
1931
1932 pInfo->cBalloonChunks = pReq->cBalloonChunks;
1933 pInfo->fHandleInR3 = false;
1934
1935 rc = vboxGuestSetBalloonSizeKernel(pDevExt, pReq->cBalloonChunks, &pInfo->fHandleInR3);
1936 /* Ignore various out of memory failures. */
1937 if ( rc == VERR_NO_MEMORY
1938 || rc == VERR_NO_PHYS_MEMORY
1939 || rc == VERR_NO_CONT_MEMORY)
1940 rc = VINF_SUCCESS;
1941
1942 if (pcbDataReturned)
1943 *pcbDataReturned = sizeof(VBoxGuestCheckBalloonInfo);
1944 }
1945 else
1946 LogRel(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON: VbglGRPerform failed. rc=%Rrc\n", rc));
1947 VbglGRFree(&pReq->header);
1948 }
1949 }
1950 else
1951 rc = VERR_PERMISSION_DENIED;
1952
1953 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
1954 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON returns %Rrc\n", rc));
1955 return rc;
1956}
1957
1958
1959/**
1960 * Handle a request for changing the memory balloon.
1961 *
1962 * @returns VBox status code.
1963 *
1964 * @param pDevExt The device extention.
1965 * @param pSession The session.
1966 * @param pInfo The change request structure (input).
1967 * @param pcbDataReturned Where to store the amount of returned data. Can
1968 * be NULL.
1969 */
1970static int VBoxGuestCommonIOCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1971 VBoxGuestChangeBalloonInfo *pInfo, size_t *pcbDataReturned)
1972{
1973 int rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
1974 AssertRCReturn(rc, rc);
1975
1976 if (!pDevExt->MemBalloon.fUseKernelAPI)
1977 {
1978 /*
1979 * The first user trying to query/change the balloon becomes the
1980 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
1981 */
1982 if ( pDevExt->MemBalloon.pOwner != pSession
1983 && pDevExt->MemBalloon.pOwner == NULL)
1984 pDevExt->MemBalloon.pOwner = pSession;
1985
1986 if (pDevExt->MemBalloon.pOwner == pSession)
1987 {
1988 rc = vboxGuestSetBalloonSizeFromUser(pDevExt, pSession, pInfo->u64ChunkAddr, pInfo->fInflate);
1989 if (pcbDataReturned)
1990 *pcbDataReturned = 0;
1991 }
1992 else
1993 rc = VERR_PERMISSION_DENIED;
1994 }
1995 else
1996 rc = VERR_PERMISSION_DENIED;
1997
1998 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
1999 return rc;
2000}
2001
2002
2003/**
2004 * Guest backdoor logging.
2005 *
2006 * @returns VBox status code.
2007 *
2008 * @param pch The log message (need not be NULL terminated).
2009 * @param cbData Size of the buffer.
2010 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2011 */
2012static int VBoxGuestCommonIOCtl_Log(const char *pch, size_t cbData, size_t *pcbDataReturned)
2013{
2014 NOREF(pch);
2015 NOREF(cbData);
2016 Log(("%.*s", cbData, pch));
2017 if (pcbDataReturned)
2018 *pcbDataReturned = 0;
2019 return VINF_SUCCESS;
2020}
2021
2022
2023/**
2024 * Common IOCtl for user to kernel and kernel to kernel communcation.
2025 *
2026 * This function only does the basic validation and then invokes
2027 * worker functions that takes care of each specific function.
2028 *
2029 * @returns VBox status code.
2030 *
2031 * @param iFunction The requested function.
2032 * @param pDevExt The device extension.
2033 * @param pSession The client session.
2034 * @param pvData The input/output data buffer. Can be NULL depending on the function.
2035 * @param cbData The max size of the data buffer.
2036 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2037 */
2038int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2039 void *pvData, size_t cbData, size_t *pcbDataReturned)
2040{
2041 Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
2042 iFunction, pDevExt, pSession, pvData, cbData));
2043
2044 /*
2045 * Make sure the returned data size is set to zero.
2046 */
2047 if (pcbDataReturned)
2048 *pcbDataReturned = 0;
2049
2050 /*
2051 * Define some helper macros to simplify validation.
2052 */
2053#define CHECKRET_RING0(mnemonic) \
2054 do { \
2055 if (pSession->R0Process != NIL_RTR0PROCESS) \
2056 { \
2057 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
2058 pSession->Process, (uintptr_t)pSession->R0Process)); \
2059 return VERR_PERMISSION_DENIED; \
2060 } \
2061 } while (0)
2062#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
2063 do { \
2064 if (cbData < (cbMin)) \
2065 { \
2066 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
2067 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
2068 return VERR_BUFFER_OVERFLOW; \
2069 } \
2070 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
2071 { \
2072 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": Invalid pointer %p\n", pvData)); \
2073 return VERR_INVALID_POINTER; \
2074 } \
2075 } while (0)
2076
2077
2078 /*
2079 * Deal with variably sized requests first.
2080 */
2081 int rc = VINF_SUCCESS;
2082 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
2083 {
2084 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
2085 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
2086 }
2087#ifdef VBOX_WITH_HGCM
2088 /*
2089 * These ones are a bit tricky.
2090 */
2091 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
2092 {
2093 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2094 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2095 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2096 fInterruptible, false /*f32bit*/,
2097 0, cbData, pcbDataReturned);
2098 }
2099 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
2100 {
2101 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2102 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2103 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2104 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2105 false /*f32bit*/,
2106 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2107 }
2108# ifdef RT_ARCH_AMD64
2109 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
2110 {
2111 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2112 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2113 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2114 fInterruptible, true /*f32bit*/,
2115 0, cbData, pcbDataReturned);
2116 }
2117 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
2118 {
2119 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2120 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2121 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2122 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2123 true /*f32bit*/,
2124 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2125 }
2126# endif
2127#endif /* VBOX_WITH_HGCM */
2128 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
2129 {
2130 CHECKRET_MIN_SIZE("LOG", 1);
2131 rc = VBoxGuestCommonIOCtl_Log((char *)pvData, cbData, pcbDataReturned);
2132 }
2133 else
2134 {
2135 switch (iFunction)
2136 {
2137 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
2138 CHECKRET_RING0("GETVMMDEVPORT");
2139 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
2140 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
2141 break;
2142
2143 case VBOXGUEST_IOCTL_WAITEVENT:
2144 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
2145 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, pSession, (VBoxGuestWaitEventInfo *)pvData,
2146 pcbDataReturned, pSession->R0Process != NIL_RTR0PROCESS);
2147 break;
2148
2149 case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
2150 if (cbData != 0)
2151 rc = VERR_INVALID_PARAMETER;
2152 rc = VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
2153 break;
2154
2155 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
2156 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
2157 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, (VBoxGuestFilterMaskInfo *)pvData);
2158 break;
2159
2160#ifdef VBOX_WITH_HGCM
2161 case VBOXGUEST_IOCTL_HGCM_CONNECT:
2162# ifdef RT_ARCH_AMD64
2163 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
2164# endif
2165 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
2166 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
2167 break;
2168
2169 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
2170# ifdef RT_ARCH_AMD64
2171 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
2172# endif
2173 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
2174 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
2175 break;
2176
2177 case VBOXGUEST_IOCTL_CLIPBOARD_CONNECT:
2178 CHECKRET_MIN_SIZE("CLIPBOARD_CONNECT", sizeof(uint32_t));
2179 rc = VBoxGuestCommonIOCtl_HGCMClipboardReConnect(pDevExt, (uint32_t *)pvData, pcbDataReturned);
2180 break;
2181#endif /* VBOX_WITH_HGCM */
2182
2183 case VBOXGUEST_IOCTL_CHECK_BALLOON:
2184 CHECKRET_MIN_SIZE("CHECK_MEMORY_BALLOON", sizeof(VBoxGuestCheckBalloonInfo));
2185 rc = VBoxGuestCommonIOCtl_CheckMemoryBalloon(pDevExt, pSession, (VBoxGuestCheckBalloonInfo *)pvData, pcbDataReturned);
2186 break;
2187
2188 case VBOXGUEST_IOCTL_CHANGE_BALLOON:
2189 CHECKRET_MIN_SIZE("CHANGE_MEMORY_BALLOON", sizeof(VBoxGuestChangeBalloonInfo));
2190 rc = VBoxGuestCommonIOCtl_ChangeMemoryBalloon(pDevExt, pSession, (VBoxGuestChangeBalloonInfo *)pvData, pcbDataReturned);
2191 break;
2192
2193 default:
2194 {
2195 LogRel(("VBoxGuestCommonIOCtl: Unknown request iFunction=%#x Stripped size=%#x\n", iFunction,
2196 VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
2197 rc = VERR_NOT_SUPPORTED;
2198 break;
2199 }
2200 }
2201 }
2202
2203 Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
2204 return rc;
2205}
2206
2207
2208
2209/**
2210 * Common interrupt service routine.
2211 *
2212 * This deals with events and with waking up thread waiting for those events.
2213 *
2214 * @returns true if it was our interrupt, false if it wasn't.
2215 * @param pDevExt The VBoxGuest device extension.
2216 */
2217bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
2218{
2219 bool fMousePositionChanged = false;
2220 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
2221 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
2222 int rc = 0;
2223 bool fOurIrq;
2224
2225 /*
2226 * Make sure we've initalized the device extension.
2227 */
2228 if (RT_UNLIKELY(!pReq))
2229 return false;
2230
2231 /*
2232 * Enter the spinlock and check if it's our IRQ or not.
2233 *
2234 * Note! Solaris cannot do RTSemEventMultiSignal with interrupts disabled
2235 * so we're entering the spinlock without disabling them. This works
2236 * fine as long as we never called in a nested fashion.
2237 */
2238#if defined(RT_OS_SOLARIS)
2239 RTSpinlockAcquire(pDevExt->EventSpinlock, &Tmp);
2240#else
2241 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
2242#endif
2243 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
2244 if (fOurIrq)
2245 {
2246 /*
2247 * Acknowlegde events.
2248 * We don't use VbglGRPerform here as it may take another spinlocks.
2249 */
2250 pReq->header.rc = VERR_INTERNAL_ERROR;
2251 pReq->events = 0;
2252 ASMCompilerBarrier();
2253 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
2254 ASMCompilerBarrier(); /* paranoia */
2255 if (RT_SUCCESS(pReq->header.rc))
2256 {
2257 uint32_t fEvents = pReq->events;
2258 PVBOXGUESTWAIT pWait;
2259
2260 Log(("VBoxGuestCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
2261
2262 /*
2263 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
2264 */
2265 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
2266 {
2267 fMousePositionChanged = true;
2268 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
2269 }
2270
2271#ifdef VBOX_WITH_HGCM
2272 /*
2273 * The HGCM event/list is kind of different in that we evaluate all entries.
2274 */
2275 if (fEvents & VMMDEV_EVENT_HGCM)
2276 {
2277 for (pWait = pDevExt->HGCMWaitList.pHead; pWait; pWait = pWait->pNext)
2278 if ( !pWait->fResEvents
2279 && (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE))
2280 {
2281 pWait->fResEvents = VMMDEV_EVENT_HGCM;
2282 rc |= RTSemEventMultiSignal(pWait->Event);
2283 }
2284 fEvents &= ~VMMDEV_EVENT_HGCM;
2285 }
2286#endif
2287
2288 /*
2289 * Normal FIFO waiter evaluation.
2290 */
2291 fEvents |= pDevExt->f32PendingEvents;
2292 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
2293 if ( (pWait->fReqEvents & fEvents)
2294 && !pWait->fResEvents)
2295 {
2296 pWait->fResEvents = pWait->fReqEvents & fEvents;
2297 fEvents &= ~pWait->fResEvents;
2298 rc |= RTSemEventMultiSignal(pWait->Event);
2299 if (!fEvents)
2300 break;
2301 }
2302 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
2303 }
2304 else /* something is serious wrong... */
2305 Log(("VBoxGuestCommonISR: acknowledge events failed rc=%Rrc (events=%#x)!!\n",
2306 pReq->header.rc, pReq->events));
2307 }
2308 else
2309 LogFlow(("VBoxGuestCommonISR: not ours\n"));
2310
2311 /*
2312 * Work the poll and async notification queues on OSes that implements that.
2313 * Do this outside the spinlock to prevent some recursive spinlocking.
2314 */
2315#if defined(RT_OS_SOLARIS)
2316 RTSpinlockRelease(pDevExt->EventSpinlock, &Tmp);
2317#else
2318 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
2319#endif
2320
2321 if (fMousePositionChanged)
2322 {
2323 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
2324 VBoxGuestNativeISRMousePollEvent(pDevExt);
2325 }
2326
2327 Assert(rc == 0);
2328 return fOurIrq;
2329}
2330
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette