VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 31241

Last change on this file since 31241 was 31241, checked in by vboxsync, 14 years ago

Removed use of interface version for IGuest::getAdditionsVersion, some cleanup.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 83.9 KB
Line 
1/* $Id: VBoxGuest.cpp 31241 2010-07-30 12:50:58Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2010 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_DEFAULT
23#include "VBoxGuestInternal.h"
24#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
25#include <VBox/log.h>
26#include <iprt/mem.h>
27#include <iprt/time.h>
28#include <iprt/memobj.h>
29#include <iprt/asm.h>
30#include <iprt/asm-amd64-x86.h>
31#include <iprt/string.h>
32#include <iprt/process.h>
33#include <iprt/assert.h>
34#include <iprt/param.h>
35#ifdef VBOX_WITH_HGCM
36# include <iprt/thread.h>
37#endif
38#include "version-generated.h"
39#if defined(RT_OS_LINUX) || defined(RT_OS_FREEBSD)
40# include "revision-generated.h"
41#endif
42
43
44/*******************************************************************************
45* Internal Functions *
46*******************************************************************************/
47#ifdef VBOX_WITH_HGCM
48static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
49#endif
50
51
52/*******************************************************************************
53* Global Variables *
54*******************************************************************************/
55static const size_t cbChangeMemBalloonReq = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
56
57
58
59/**
60 * Reserves memory in which the VMM can relocate any guest mappings
61 * that are floating around.
62 *
63 * This operation is a little bit tricky since the VMM might not accept
64 * just any address because of address clashes between the three contexts
65 * it operates in, so use a small stack to perform this operation.
66 *
67 * @returns VBox status code (ignored).
68 * @param pDevExt The device extension.
69 */
70static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
71{
72 /*
73 * Query the required space.
74 */
75 VMMDevReqHypervisorInfo *pReq;
76 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
77 if (RT_FAILURE(rc))
78 return rc;
79 pReq->hypervisorStart = 0;
80 pReq->hypervisorSize = 0;
81 rc = VbglGRPerform(&pReq->header);
82 if (RT_FAILURE(rc)) /* this shouldn't happen! */
83 {
84 VbglGRFree(&pReq->header);
85 return rc;
86 }
87
88 /*
89 * The VMM will report back if there is nothing it wants to map, like for
90 * insance in VT-x and AMD-V mode.
91 */
92 if (pReq->hypervisorSize == 0)
93 Log(("vboxGuestInitFixateGuestMappings: nothing to do\n"));
94 else
95 {
96 /*
97 * We have to try several times since the host can be picky
98 * about certain addresses.
99 */
100 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
101 uint32_t cbHypervisor = pReq->hypervisorSize;
102 RTR0MEMOBJ ahTries[5];
103 uint32_t iTry;
104 bool fBitched = false;
105 Log(("vboxGuestInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
106 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
107 {
108 /*
109 * Reserve space, or if that isn't supported, create a object for
110 * some fictive physical memory and map that in to kernel space.
111 *
112 * To make the code a bit uglier, most systems cannot help with
113 * 4MB alignment, so we have to deal with that in addition to
114 * having two ways of getting the memory.
115 */
116 uint32_t uAlignment = _4M;
117 RTR0MEMOBJ hObj;
118 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
119 if (rc == VERR_NOT_SUPPORTED)
120 {
121 uAlignment = PAGE_SIZE;
122 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
123 }
124 if (rc == VERR_NOT_SUPPORTED)
125 {
126 if (hFictive == NIL_RTR0MEMOBJ)
127 {
128 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M, RTMEM_CACHE_POLICY_DONT_CARE);
129 if (RT_FAILURE(rc))
130 break;
131 hFictive = hObj;
132 }
133 uAlignment = _4M;
134 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
135 if (rc == VERR_NOT_SUPPORTED)
136 {
137 uAlignment = PAGE_SIZE;
138 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
139 }
140 }
141 if (RT_FAILURE(rc))
142 {
143 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
144 rc, cbHypervisor, uAlignment, iTry));
145 fBitched = true;
146 break;
147 }
148
149 /*
150 * Try set it.
151 */
152 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
153 pReq->header.rc = VERR_INTERNAL_ERROR;
154 pReq->hypervisorSize = cbHypervisor;
155 pReq->hypervisorStart = (uintptr_t)RTR0MemObjAddress(hObj);
156 if ( uAlignment == PAGE_SIZE
157 && pReq->hypervisorStart & (_4M - 1))
158 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
159 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
160
161 rc = VbglGRPerform(&pReq->header);
162 if (RT_SUCCESS(rc))
163 {
164 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
165 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
166 RTR0MemObjAddress(pDevExt->hGuestMappings),
167 RTR0MemObjSize(pDevExt->hGuestMappings),
168 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
169 break;
170 }
171 ahTries[iTry] = hObj;
172 }
173
174 /*
175 * Cleanup failed attempts.
176 */
177 while (iTry-- > 0)
178 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
179 if ( RT_FAILURE(rc)
180 && hFictive != NIL_RTR0PTR)
181 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
182 if (RT_FAILURE(rc) && !fBitched)
183 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
184 }
185 VbglGRFree(&pReq->header);
186
187 /*
188 * We ignore failed attempts for now.
189 */
190 return VINF_SUCCESS;
191}
192
193
194/**
195 * Undo what vboxGuestInitFixateGuestMappings did.
196 *
197 * @param pDevExt The device extension.
198 */
199static void vboxGuestTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
200{
201 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
202 {
203 /*
204 * Tell the host that we're going to free the memory we reserved for
205 * it, the free it up. (Leak the memory if anything goes wrong here.)
206 */
207 VMMDevReqHypervisorInfo *pReq;
208 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
209 if (RT_SUCCESS(rc))
210 {
211 pReq->hypervisorStart = 0;
212 pReq->hypervisorSize = 0;
213 rc = VbglGRPerform(&pReq->header);
214 VbglGRFree(&pReq->header);
215 }
216 if (RT_SUCCESS(rc))
217 {
218 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
219 AssertRC(rc);
220 }
221 else
222 LogRel(("vboxGuestTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
223
224 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
225 }
226}
227
228
229/**
230 * Sets the interrupt filter mask during initialization and termination.
231 *
232 * This will ASSUME that we're the ones in carge over the mask, so
233 * we'll simply clear all bits we don't set.
234 *
235 * @returns VBox status code (ignored).
236 * @param pDevExt The device extension.
237 * @param fMask The new mask.
238 */
239static int vboxGuestSetFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask)
240{
241 VMMDevCtlGuestFilterMask *pReq;
242 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
243 if (RT_SUCCESS(rc))
244 {
245 pReq->u32OrMask = fMask;
246 pReq->u32NotMask = ~fMask;
247 rc = VbglGRPerform(&pReq->header);
248 if (RT_FAILURE(rc))
249 LogRel(("vboxGuestSetFilterMask: failed with rc=%Rrc\n", rc));
250 VbglGRFree(&pReq->header);
251 }
252 return rc;
253}
254
255
256/**
257 * Report guest information to the VMMDev.
258 *
259 * @returns VBox status code.
260 * @param pDevExt The device extension.
261 * @param enmOSType The OS type to report.
262 */
263static int vboxGuestInitReportGuestInfo(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
264{
265 /*
266 * Report general info + capabilities to host.
267 */
268 VMMDevReportGuestInfo *pReq;
269 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_ReportGuestInfo);
270 if (RT_SUCCESS(rc))
271 {
272 pReq->guestInfo.interfaceVersion = VMMDEV_VERSION;
273 pReq->guestInfo.osType = enmOSType;
274 rc = VbglGRPerform(&pReq->header);
275 if (RT_FAILURE(rc))
276 LogRel(("vboxGuestInitReportGuestInfo: 1st part failed with rc=%Rrc\n", rc));
277 VbglGRFree(&pReq->header);
278 }
279 VMMDevReportGuestInfo2 *pReq2;
280 if (RT_SUCCESS(rc))
281 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq2, sizeof(*pReq2), VMMDevReq_ReportGuestInfo2);
282 if (RT_SUCCESS(rc))
283 {
284 pReq2->guestInfo.additionsMajor = VBOX_VERSION_MAJOR;
285 pReq2->guestInfo.additionsMinor = VBOX_VERSION_MINOR;
286 pReq2->guestInfo.additionsBuild = VBOX_VERSION_BUILD;
287 pReq2->guestInfo.additionsRevision = VBOX_SVN_REV;
288 pReq2->guestInfo.additionsFeatures = 0;
289 RTStrCopy(pReq2->guestInfo.szName, sizeof(pReq2->guestInfo.szName), VBOX_VERSION_STRING);
290 rc = VbglGRPerform(&pReq2->header);
291 if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
292 rc = VINF_SUCCESS;
293 if (RT_FAILURE(rc))
294 LogRel(("vboxGuestInitReportGuestInfo: 2nd part failed with rc=%Rrc\n", rc));
295 VbglGRFree(&pReq2->header);
296 }
297
298 /*
299 * Report guest status to host. Because the host set the "Guest Additions active" flag as soon
300 * as he received the VMMDevReportGuestInfo above to make sure all is compatible with older Guest
301 * Additions we now have to disable that flag again here (too early, VBoxService and friends need
302 * to start up first).
303 */
304 VMMDevReportGuestStatus *pReq3;
305 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq3, sizeof(*pReq3), VMMDevReq_ReportGuestStatus);
306 if (RT_SUCCESS(rc))
307 {
308 pReq3->guestStatus.facility = VBoxGuestStatusFacility_VBoxGuestDriver;
309 pReq3->guestStatus.status = VBoxGuestStatusCurrent_Active; /** @todo Are we actually *really* active at this point? */
310 pReq3->guestStatus.flags = 0;
311 rc = VbglGRPerform(&pReq3->header);
312 if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
313 rc = VINF_SUCCESS;
314 if (RT_FAILURE(rc))
315 LogRel(("vboxGuestInitReportGuestInfo: reporting status failed with rc=%Rrc\n", rc));
316 VbglGRFree(&pReq3->header);
317 }
318 return rc;
319}
320
321
322/**
323 * Inflate the balloon by one chunk represented by an R0 memory object.
324 *
325 * The caller owns the balloon mutex.
326 *
327 * @returns IPRT status code.
328 * @param pMemObj Pointer to the R0 memory object.
329 * @param pReq The pre-allocated request for performing the VMMDev call.
330 */
331static int vboxGuestBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
332{
333 uint32_t iPage;
334 int rc;
335
336 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
337 {
338 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
339 pReq->aPhysPage[iPage] = phys;
340 }
341
342 /* Protect this memory from being accessed. Doesn't work on every platform and probably
343 * doesn't work for R3-provided memory, therefore ignore the return value. Unprotect
344 * done when object is freed. */
345 RTR0MemObjProtect(*pMemObj, 0, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, RTMEM_PROT_NONE);
346
347 pReq->fInflate = true;
348 pReq->header.size = cbChangeMemBalloonReq;
349 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
350
351 rc = VbglGRPerform(&pReq->header);
352 if (RT_FAILURE(rc))
353 LogRel(("vboxGuestBalloonInflate: VbglGRPerform failed. rc=%Rrc\n", rc));
354 return rc;
355}
356
357
358/**
359 * Deflate the balloon by one chunk - info the host and free the memory object.
360 *
361 * The caller owns the balloon mutex.
362 *
363 * @returns IPRT status code.
364 * @param pMemObj Pointer to the R0 memory object.
365 * The memory object will be freed afterwards.
366 * @param pReq The pre-allocated request for performing the VMMDev call.
367 */
368static int vboxGuestBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
369{
370 uint32_t iPage;
371 int rc;
372
373 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
374 {
375 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
376 pReq->aPhysPage[iPage] = phys;
377 }
378
379 pReq->fInflate = false;
380 pReq->header.size = cbChangeMemBalloonReq;
381 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
382
383 rc = VbglGRPerform(&pReq->header);
384 if (RT_FAILURE(rc))
385 {
386 LogRel(("vboxGuestBalloonDeflate: VbglGRPerform failed. rc=%Rrc\n", rc));
387 return rc;
388 }
389
390 /* undo previous protec call, ignore rc for reasons stated there. */
391 RTR0MemObjProtect(*pMemObj, 0, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
392 /*RTR0MemObjProtect(*pMemObj, 0, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC); - probably not safe... */
393
394 rc = RTR0MemObjFree(*pMemObj, true);
395 if (RT_FAILURE(rc))
396 {
397 LogRel(("vboxGuestBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
398 return rc;
399 }
400
401 *pMemObj = NIL_RTR0MEMOBJ;
402 return VINF_SUCCESS;
403}
404
405
406/**
407 * Inflate/deflate the memory balloon and notify the host.
408 *
409 * This is a worker used by VBoxGuestCommonIOCtl_CheckMemoryBalloon - it takes
410 * the mutex.
411 *
412 * @returns VBox status code.
413 * @param pDevExt The device extension.
414 * @param pSession The session.
415 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
416 * @param pfHandleInR3 Where to return the handle-in-ring3 indicator
417 * (VINF_SUCCESS if set).
418 */
419static int vboxGuestSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, uint32_t cBalloonChunks, uint32_t *pfHandleInR3)
420{
421 int rc = VINF_SUCCESS;
422
423 if (pDevExt->MemBalloon.fUseKernelAPI)
424 {
425 VMMDevChangeMemBalloon *pReq;
426 uint32_t i;
427
428 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
429 {
430 LogRel(("vboxGuestSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
431 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
432 return VERR_INVALID_PARAMETER;
433 }
434
435 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
436 return VINF_SUCCESS; /* nothing to do */
437
438 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
439 && !pDevExt->MemBalloon.paMemObj)
440 {
441 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
442 if (!pDevExt->MemBalloon.paMemObj)
443 {
444 LogRel(("VBoxGuestSetBalloonSizeKernel: no memory for paMemObj!\n"));
445 return VERR_NO_MEMORY;
446 }
447 }
448
449 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
450 if (RT_FAILURE(rc))
451 return rc;
452
453 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
454 {
455 /* inflate */
456 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
457 {
458 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
459 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
460 if (RT_FAILURE(rc))
461 {
462 if (rc == VERR_NOT_SUPPORTED)
463 {
464 /* not supported -- fall back to the R3-allocated memory. */
465 rc = VINF_SUCCESS;
466 pDevExt->MemBalloon.fUseKernelAPI = false;
467 Assert(pDevExt->MemBalloon.cChunks == 0);
468 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
469 }
470 /* else if (rc == VERR_NO_MEMORY || rc == VERR_NO_PHYS_MEMORY):
471 * cannot allocate more memory => don't try further, just stop here */
472 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
473 break;
474 }
475
476 rc = vboxGuestBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
477 if (RT_FAILURE(rc))
478 {
479 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
480 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
481 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
482 break;
483 }
484 pDevExt->MemBalloon.cChunks++;
485 }
486 }
487 else
488 {
489 /* deflate */
490 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
491 {
492 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
493 if (RT_FAILURE(rc))
494 {
495 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
496 break;
497 }
498 pDevExt->MemBalloon.cChunks--;
499 }
500 }
501
502 VbglGRFree(&pReq->header);
503 }
504
505 /*
506 * Set the handle-in-ring3 indicator. When set Ring-3 will have to work
507 * the balloon changes via the other API.
508 */
509 *pfHandleInR3 = pDevExt->MemBalloon.fUseKernelAPI ? false : true;
510
511 return rc;
512}
513
514
515/**
516 * Inflate/deflate the balloon by one chunk.
517 *
518 * Worker for VBoxGuestCommonIOCtl_ChangeMemoryBalloon - it takes the mutex.
519 *
520 * @returns VBox status code.
521 * @param pDevExt The device extension.
522 * @param pSession The session.
523 * @param u64ChunkAddr The address of the chunk to add to / remove from the
524 * balloon.
525 * @param fInflate Inflate if true, deflate if false.
526 */
527static int vboxGuestSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
528 uint64_t u64ChunkAddr, bool fInflate)
529{
530 VMMDevChangeMemBalloon *pReq;
531 int rc = VINF_SUCCESS;
532 uint32_t i;
533 PRTR0MEMOBJ pMemObj = NULL;
534
535 if (fInflate)
536 {
537 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
538 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
539 {
540 LogRel(("vboxGuestSetBalloonSize: cannot inflate balloon, already have %u chunks (max=%u)\n",
541 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
542 return VERR_INVALID_PARAMETER;
543 }
544
545 if (!pDevExt->MemBalloon.paMemObj)
546 {
547 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
548 if (!pDevExt->MemBalloon.paMemObj)
549 {
550 LogRel(("VBoxGuestSetBalloonSizeFromUser: no memory for paMemObj!\n"));
551 return VERR_NO_MEMORY;
552 }
553 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
554 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
555 }
556 }
557 else
558 {
559 if (pDevExt->MemBalloon.cChunks == 0)
560 {
561 AssertMsgFailed(("vboxGuestSetBalloonSize: cannot decrease balloon, already at size 0\n"));
562 return VERR_INVALID_PARAMETER;
563 }
564 }
565
566 /*
567 * Enumerate all memory objects and check if the object is already registered.
568 */
569 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
570 {
571 if ( fInflate
572 && !pMemObj
573 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
574 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
575 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == u64ChunkAddr)
576 {
577 if (fInflate)
578 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
579 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
580 break;
581 }
582 }
583 if (!pMemObj)
584 {
585 if (fInflate)
586 {
587 /* no free object pointer found -- should not happen */
588 return VERR_NO_MEMORY;
589 }
590
591 /* cannot free this memory as it wasn't provided before */
592 return VERR_NOT_FOUND;
593 }
594
595 /*
596 * Try inflate / defalte the balloon as requested.
597 */
598 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
599 if (RT_FAILURE(rc))
600 return rc;
601
602 if (fInflate)
603 {
604 rc = RTR0MemObjLockUser(pMemObj, u64ChunkAddr, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
605 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
606 if (RT_SUCCESS(rc))
607 {
608 rc = vboxGuestBalloonInflate(pMemObj, pReq);
609 if (RT_SUCCESS(rc))
610 pDevExt->MemBalloon.cChunks++;
611 else
612 {
613 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
614 RTR0MemObjFree(*pMemObj, true);
615 *pMemObj = NIL_RTR0MEMOBJ;
616 }
617 }
618 }
619 else
620 {
621 rc = vboxGuestBalloonDeflate(pMemObj, pReq);
622 if (RT_SUCCESS(rc))
623 pDevExt->MemBalloon.cChunks--;
624 else
625 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
626 }
627
628 VbglGRFree(&pReq->header);
629 return rc;
630}
631
632
633/**
634 * Cleanup the memory balloon of a session.
635 *
636 * Will request the balloon mutex, so it must be valid and the caller must not
637 * own it already.
638 *
639 * @param pDevExt The device extension.
640 * @param pDevExt The session. Can be NULL at unload.
641 */
642static void vboxGuestCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
643{
644 RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
645 if ( pDevExt->MemBalloon.pOwner == pSession
646 || pSession == NULL /*unload*/)
647 {
648 if (pDevExt->MemBalloon.paMemObj)
649 {
650 VMMDevChangeMemBalloon *pReq;
651 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
652 if (RT_SUCCESS(rc))
653 {
654 uint32_t i;
655 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
656 {
657 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
658 if (RT_FAILURE(rc))
659 {
660 LogRel(("vboxGuestCloseMemBalloon: Deflate failed with rc=%Rrc. Will leak %u chunks.\n",
661 rc, pDevExt->MemBalloon.cChunks));
662 break;
663 }
664 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
665 pDevExt->MemBalloon.cChunks--;
666 }
667 VbglGRFree(&pReq->header);
668 }
669 else
670 LogRel(("vboxGuestCloseMemBalloon: Failed to allocate VMMDev request buffer (rc=%Rrc). Will leak %u chunks.\n",
671 rc, pDevExt->MemBalloon.cChunks));
672 RTMemFree(pDevExt->MemBalloon.paMemObj);
673 pDevExt->MemBalloon.paMemObj = NULL;
674 }
675
676 pDevExt->MemBalloon.pOwner = NULL;
677 }
678 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
679}
680
681
682/**
683 * Initializes the VBoxGuest device extension when the
684 * device driver is loaded.
685 *
686 * The native code locates the VMMDev on the PCI bus and retrieve
687 * the MMIO and I/O port ranges, this function will take care of
688 * mapping the MMIO memory (if present). Upon successful return
689 * the native code should set up the interrupt handler.
690 *
691 * @returns VBox status code.
692 *
693 * @param pDevExt The device extension. Allocated by the native code.
694 * @param IOPortBase The base of the I/O port range.
695 * @param pvMMIOBase The base of the MMIO memory mapping.
696 * This is optional, pass NULL if not present.
697 * @param cbMMIO The size of the MMIO memory mapping.
698 * This is optional, pass 0 if not present.
699 * @param enmOSType The guest OS type to report to the VMMDev.
700 * @param fFixedEvents Events that will be enabled upon init and no client
701 * will ever be allowed to mask.
702 */
703int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
704 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
705{
706 int rc, rc2;
707
708 /*
709 * Adjust fFixedEvents.
710 */
711#ifdef VBOX_WITH_HGCM
712 fFixedEvents |= VMMDEV_EVENT_HGCM;
713#endif
714
715 /*
716 * Initalize the data.
717 */
718 pDevExt->IOPortBase = IOPortBase;
719 pDevExt->pVMMDevMemory = NULL;
720 pDevExt->fFixedEvents = fFixedEvents;
721 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
722 pDevExt->EventSpinlock = NIL_RTSPINLOCK;
723 pDevExt->pIrqAckEvents = NULL;
724 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
725 pDevExt->WaitList.pHead = NULL;
726 pDevExt->WaitList.pTail = NULL;
727#ifdef VBOX_WITH_HGCM
728 pDevExt->HGCMWaitList.pHead = NULL;
729 pDevExt->HGCMWaitList.pTail = NULL;
730#endif
731 pDevExt->FreeList.pHead = NULL;
732 pDevExt->FreeList.pTail = NULL;
733 pDevExt->f32PendingEvents = 0;
734 pDevExt->u32MousePosChangedSeq = 0;
735 pDevExt->SessionSpinlock = NIL_RTSPINLOCK;
736 pDevExt->u32ClipboardClientId = 0;
737 pDevExt->MemBalloon.hMtx = NIL_RTSEMFASTMUTEX;
738 pDevExt->MemBalloon.cChunks = 0;
739 pDevExt->MemBalloon.cMaxChunks = 0;
740 pDevExt->MemBalloon.fUseKernelAPI = true;
741 pDevExt->MemBalloon.paMemObj = NULL;
742 pDevExt->MemBalloon.pOwner = NULL;
743
744 /*
745 * If there is an MMIO region validate the version and size.
746 */
747 if (pvMMIOBase)
748 {
749 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
750 Assert(cbMMIO);
751 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
752 && pVMMDev->u32Size >= 32
753 && pVMMDev->u32Size <= cbMMIO)
754 {
755 pDevExt->pVMMDevMemory = pVMMDev;
756 Log(("VBoxGuestInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
757 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
758 }
759 else /* try live without it. */
760 LogRel(("VBoxGuestInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
761 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
762 }
763
764 /*
765 * Create the wait and session spinlocks as well as the ballooning mutex.
766 */
767 rc = RTSpinlockCreate(&pDevExt->EventSpinlock);
768 if (RT_SUCCESS(rc))
769 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock);
770 if (RT_FAILURE(rc))
771 {
772 LogRel(("VBoxGuestInitDevExt: failed to create spinlock, rc=%Rrc!\n", rc));
773 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
774 RTSpinlockDestroy(pDevExt->EventSpinlock);
775 return rc;
776 }
777
778 rc = RTSemFastMutexCreate(&pDevExt->MemBalloon.hMtx);
779 if (RT_FAILURE(rc))
780 {
781 LogRel(("VBoxGuestInitDevExt: failed to create mutex, rc=%Rrc!\n", rc));
782 RTSpinlockDestroy(pDevExt->SessionSpinlock);
783 RTSpinlockDestroy(pDevExt->EventSpinlock);
784 return rc;
785 }
786
787 /*
788 * Initialize the guest library and report the guest info back to VMMDev,
789 * set the interrupt control filter mask, and fixate the guest mappings
790 * made by the VMM.
791 */
792 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
793 if (RT_SUCCESS(rc))
794 {
795 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
796 if (RT_SUCCESS(rc))
797 {
798 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
799 Assert(pDevExt->PhysIrqAckEvents != 0);
800
801 rc = vboxGuestInitReportGuestInfo(pDevExt, enmOSType);
802 if (RT_SUCCESS(rc))
803 {
804 rc = vboxGuestSetFilterMask(pDevExt, fFixedEvents);
805 if (RT_SUCCESS(rc))
806 {
807 /*
808 * Disable guest graphics capability by default. The guest specific
809 * graphics driver will re-enable this when it is necessary.
810 */
811 rc = VBoxGuestSetGuestCapabilities(0, VMMDEV_GUEST_SUPPORTS_GRAPHICS);
812 if (RT_SUCCESS(rc))
813 {
814 vboxGuestInitFixateGuestMappings(pDevExt);
815 Log(("VBoxGuestInitDevExt: returns success\n"));
816 return VINF_SUCCESS;
817 }
818
819 LogRel(("VBoxGuestInitDevExt: VBoxGuestSetGuestCapabilities failed, rc=%Rrc\n", rc));
820 }
821 else
822 LogRel(("VBoxGuestInitDevExt: vboxGuestSetFilterMask failed, rc=%Rrc\n", rc));
823 }
824 else
825 LogRel(("VBoxGuestInitDevExt: vboxGuestInitReportGuestInfo failed, rc=%Rrc\n", rc));
826
827 VbglGRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
828 }
829 else
830 LogRel(("VBoxGuestInitDevExt: VBoxGRAlloc failed, rc=%Rrc\n", rc));
831
832 VbglTerminate();
833 }
834 else
835 LogRel(("VBoxGuestInitDevExt: VbglInit failed, rc=%Rrc\n", rc));
836
837 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
838 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
839 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
840 return rc; /* (failed) */
841}
842
843
844/**
845 * Deletes all the items in a wait chain.
846 * @param pWait The head of the chain.
847 */
848static void VBoxGuestDeleteWaitList(PVBOXGUESTWAITLIST pList)
849{
850 while (pList->pHead)
851 {
852 int rc2;
853 PVBOXGUESTWAIT pWait = pList->pHead;
854 pList->pHead = pWait->pNext;
855
856 pWait->pNext = NULL;
857 pWait->pPrev = NULL;
858 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
859 pWait->Event = NIL_RTSEMEVENTMULTI;
860 pWait->pSession = NULL;
861 RTMemFree(pWait);
862 }
863 pList->pHead = NULL;
864 pList->pTail = NULL;
865}
866
867
868/**
869 * Destroys the VBoxGuest device extension.
870 *
871 * The native code should call this before the driver is loaded,
872 * but don't call this on shutdown.
873 *
874 * @param pDevExt The device extension.
875 */
876void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
877{
878 int rc2;
879 Log(("VBoxGuestDeleteDevExt:\n"));
880 Log(("VBoxGuest: The additions driver is terminating.\n"));
881
882 /*
883 * Clean up the bits that involves the host first.
884 */
885 vboxGuestTermUnfixGuestMappings(pDevExt);
886 VBoxGuestSetGuestCapabilities(0, UINT32_MAX); /* clears all capabilities */
887 vboxGuestSetFilterMask(pDevExt, 0); /* filter all events */
888 vboxGuestCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
889
890 /*
891 * Cleanup all the other resources.
892 */
893 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
894 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
895 rc2 = RTSemFastMutexDestroy(pDevExt->MemBalloon.hMtx); AssertRC(rc2);
896
897 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
898#ifdef VBOX_WITH_HGCM
899 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
900#endif
901 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
902
903 VbglTerminate();
904
905 pDevExt->pVMMDevMemory = NULL;
906
907 pDevExt->IOPortBase = 0;
908 pDevExt->pIrqAckEvents = NULL;
909}
910
911
912/**
913 * Creates a VBoxGuest user session.
914 *
915 * The native code calls this when a ring-3 client opens the device.
916 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
917 *
918 * @returns VBox status code.
919 * @param pDevExt The device extension.
920 * @param ppSession Where to store the session on success.
921 */
922int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
923{
924 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
925 if (RT_UNLIKELY(!pSession))
926 {
927 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
928 return VERR_NO_MEMORY;
929 }
930
931 pSession->Process = RTProcSelf();
932 pSession->R0Process = RTR0ProcHandleSelf();
933 pSession->pDevExt = pDevExt;
934
935 *ppSession = pSession;
936 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
937 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
938 return VINF_SUCCESS;
939}
940
941
942/**
943 * Creates a VBoxGuest kernel session.
944 *
945 * The native code calls this when a ring-0 client connects to the device.
946 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
947 *
948 * @returns VBox status code.
949 * @param pDevExt The device extension.
950 * @param ppSession Where to store the session on success.
951 */
952int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
953{
954 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
955 if (RT_UNLIKELY(!pSession))
956 {
957 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
958 return VERR_NO_MEMORY;
959 }
960
961 pSession->Process = NIL_RTPROCESS;
962 pSession->R0Process = NIL_RTR0PROCESS;
963 pSession->pDevExt = pDevExt;
964
965 *ppSession = pSession;
966 LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
967 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
968 return VINF_SUCCESS;
969}
970
971
972
973/**
974 * Closes a VBoxGuest session.
975 *
976 * @param pDevExt The device extension.
977 * @param pSession The session to close (and free).
978 */
979void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
980{
981 unsigned i; NOREF(i);
982 Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
983 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
984
985#ifdef VBOX_WITH_HGCM
986 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
987 if (pSession->aHGCMClientIds[i])
988 {
989 VBoxGuestHGCMDisconnectInfo Info;
990 Info.result = 0;
991 Info.u32ClientID = pSession->aHGCMClientIds[i];
992 pSession->aHGCMClientIds[i] = 0;
993 Log(("VBoxGuestCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
994 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
995 }
996#endif
997
998 pSession->pDevExt = NULL;
999 pSession->Process = NIL_RTPROCESS;
1000 pSession->R0Process = NIL_RTR0PROCESS;
1001 vboxGuestCloseMemBalloon(pDevExt, pSession);
1002 RTMemFree(pSession);
1003}
1004
1005
1006/**
1007 * Links the wait-for-event entry into the tail of the given list.
1008 *
1009 * @param pList The list to link it into.
1010 * @param pWait The wait for event entry to append.
1011 */
1012DECLINLINE(void) VBoxGuestWaitAppend(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
1013{
1014 const PVBOXGUESTWAIT pTail = pList->pTail;
1015 pWait->pNext = NULL;
1016 pWait->pPrev = pTail;
1017 if (pTail)
1018 pTail->pNext = pWait;
1019 else
1020 pList->pHead = pWait;
1021 pList->pTail = pWait;
1022}
1023
1024
1025/**
1026 * Unlinks the wait-for-event entry.
1027 *
1028 * @param pList The list to unlink it from.
1029 * @param pWait The wait for event entry to unlink.
1030 */
1031DECLINLINE(void) VBoxGuestWaitUnlink(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
1032{
1033 const PVBOXGUESTWAIT pPrev = pWait->pPrev;
1034 const PVBOXGUESTWAIT pNext = pWait->pNext;
1035 if (pNext)
1036 pNext->pPrev = pPrev;
1037 else
1038 pList->pTail = pPrev;
1039 if (pPrev)
1040 pPrev->pNext = pNext;
1041 else
1042 pList->pHead = pNext;
1043}
1044
1045
1046/**
1047 * Allocates a wiat-for-event entry.
1048 *
1049 * @returns The wait-for-event entry.
1050 * @param pDevExt The device extension.
1051 * @param pSession The session that's allocating this. Can be NULL.
1052 */
1053static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1054{
1055 /*
1056 * Allocate it one way or the other.
1057 */
1058 PVBOXGUESTWAIT pWait = pDevExt->FreeList.pTail;
1059 if (pWait)
1060 {
1061 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1062 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1063
1064 pWait = pDevExt->FreeList.pTail;
1065 if (pWait)
1066 VBoxGuestWaitUnlink(&pDevExt->FreeList, pWait);
1067
1068 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1069 }
1070 if (!pWait)
1071 {
1072 static unsigned s_cErrors = 0;
1073 int rc;
1074
1075 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1076 if (!pWait)
1077 {
1078 if (s_cErrors++ < 32)
1079 LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
1080 return NULL;
1081 }
1082
1083 rc = RTSemEventMultiCreate(&pWait->Event);
1084 if (RT_FAILURE(rc))
1085 {
1086 if (s_cErrors++ < 32)
1087 LogRel(("VBoxGuestCommonIOCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
1088 RTMemFree(pWait);
1089 return NULL;
1090 }
1091 }
1092
1093 /*
1094 * Zero members just as an precaution.
1095 */
1096 pWait->pNext = NULL;
1097 pWait->pPrev = NULL;
1098 pWait->fReqEvents = 0;
1099 pWait->fResEvents = 0;
1100 pWait->pSession = pSession;
1101#ifdef VBOX_WITH_HGCM
1102 pWait->pHGCMReq = NULL;
1103#endif
1104 RTSemEventMultiReset(pWait->Event);
1105 return pWait;
1106}
1107
1108
1109/**
1110 * Frees the wait-for-event entry.
1111 * The caller must own the wait spinlock!
1112 *
1113 * @param pDevExt The device extension.
1114 * @param pWait The wait-for-event entry to free.
1115 */
1116static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1117{
1118 pWait->fReqEvents = 0;
1119 pWait->fResEvents = 0;
1120#ifdef VBOX_WITH_HGCM
1121 pWait->pHGCMReq = NULL;
1122#endif
1123 VBoxGuestWaitAppend(&pDevExt->FreeList, pWait);
1124}
1125
1126
1127/**
1128 * Frees the wait-for-event entry.
1129 *
1130 * @param pDevExt The device extension.
1131 * @param pWait The wait-for-event entry to free.
1132 */
1133static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1134{
1135 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1136 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1137 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1138 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1139}
1140
1141
1142/**
1143 * Modifies the guest capabilities.
1144 *
1145 * Should be called during driver init and termination.
1146 *
1147 * @returns VBox status code.
1148 * @param fOr The Or mask (what to enable).
1149 * @param fNot The Not mask (what to disable).
1150 */
1151int VBoxGuestSetGuestCapabilities(uint32_t fOr, uint32_t fNot)
1152{
1153 VMMDevReqGuestCapabilities2 *pReq;
1154 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
1155 if (RT_FAILURE(rc))
1156 {
1157 Log(("VBoxGuestSetGuestCapabilities: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1158 sizeof(*pReq), sizeof(*pReq), rc));
1159 return rc;
1160 }
1161
1162 pReq->u32OrMask = fOr;
1163 pReq->u32NotMask = fNot;
1164
1165 rc = VbglGRPerform(&pReq->header);
1166 if (RT_FAILURE(rc))
1167 Log(("VBoxGuestSetGuestCapabilities: VbglGRPerform failed, rc=%Rrc!\n", rc));
1168
1169 VbglGRFree(&pReq->header);
1170 return rc;
1171}
1172
1173
1174/**
1175 * Implements the fast (no input or output) type of IOCtls.
1176 *
1177 * This is currently just a placeholder stub inherited from the support driver code.
1178 *
1179 * @returns VBox status code.
1180 * @param iFunction The IOCtl function number.
1181 * @param pDevExt The device extension.
1182 * @param pSession The session.
1183 */
1184int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1185{
1186 Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1187
1188 NOREF(iFunction);
1189 NOREF(pDevExt);
1190 NOREF(pSession);
1191 return VERR_NOT_SUPPORTED;
1192}
1193
1194
1195/**
1196 * Return the VMM device port.
1197 *
1198 * returns IPRT status code.
1199 * @param pDevExt The device extension.
1200 * @param pInfo The request info.
1201 * @param pcbDataReturned (out) contains the number of bytes to return.
1202 */
1203static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
1204{
1205 Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
1206 pInfo->portAddress = pDevExt->IOPortBase;
1207 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
1208 if (pcbDataReturned)
1209 *pcbDataReturned = sizeof(*pInfo);
1210 return VINF_SUCCESS;
1211}
1212
1213
1214/**
1215 * Worker VBoxGuestCommonIOCtl_WaitEvent.
1216 * The caller enters the spinlock, we may or may not leave it.
1217 *
1218 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
1219 */
1220DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWaitEventInfo *pInfo,
1221 int iEvent, const uint32_t fReqEvents, PRTSPINLOCKTMP pTmp)
1222{
1223 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
1224 if (fMatches)
1225 {
1226 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
1227 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, pTmp);
1228
1229 pInfo->u32EventFlagsOut = fMatches;
1230 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1231 if (fReqEvents & ~((uint32_t)1 << iEvent))
1232 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1233 else
1234 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1235 return VINF_SUCCESS;
1236 }
1237 return VERR_TIMEOUT;
1238}
1239
1240
1241static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1242 VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned, bool fInterruptible)
1243{
1244 pInfo->u32EventFlagsOut = 0;
1245 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1246 if (pcbDataReturned)
1247 *pcbDataReturned = sizeof(*pInfo);
1248
1249 /*
1250 * Copy and verify the input mask.
1251 */
1252 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
1253 int iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
1254 if (RT_UNLIKELY(iEvent < 0))
1255 {
1256 Log(("VBoxGuestCommonIOCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
1257 return VERR_INVALID_PARAMETER;
1258 }
1259
1260 /*
1261 * Check the condition up front, before doing the wait-for-event allocations.
1262 */
1263 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1264 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1265 int rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
1266 if (rc == VINF_SUCCESS)
1267 return rc;
1268 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1269
1270 if (!pInfo->u32TimeoutIn)
1271 {
1272 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1273 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
1274 return VERR_TIMEOUT;
1275 }
1276
1277 PVBOXGUESTWAIT pWait = VBoxGuestWaitAlloc(pDevExt, pSession);
1278 if (!pWait)
1279 return VERR_NO_MEMORY;
1280 pWait->fReqEvents = fReqEvents;
1281
1282 /*
1283 * We've got the wait entry now, re-enter the spinlock and check for the condition.
1284 * If the wait condition is met, return.
1285 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
1286 */
1287 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1288 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
1289 if (rc == VINF_SUCCESS)
1290 {
1291 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1292 return rc;
1293 }
1294 VBoxGuestWaitAppend(&pDevExt->WaitList, pWait);
1295 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1296
1297 if (fInterruptible)
1298 rc = RTSemEventMultiWaitNoResume(pWait->Event,
1299 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1300 else
1301 rc = RTSemEventMultiWait(pWait->Event,
1302 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1303
1304 /*
1305 * There is one special case here and that's when the semaphore is
1306 * destroyed upon device driver unload. This shouldn't happen of course,
1307 * but in case it does, just get out of here ASAP.
1308 */
1309 if (rc == VERR_SEM_DESTROYED)
1310 return rc;
1311
1312 /*
1313 * Unlink the wait item and dispose of it.
1314 */
1315 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1316 VBoxGuestWaitUnlink(&pDevExt->WaitList, pWait);
1317 const uint32_t fResEvents = pWait->fResEvents;
1318 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1319 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1320
1321 /*
1322 * Now deal with the return code.
1323 */
1324 if ( fResEvents
1325 && fResEvents != UINT32_MAX)
1326 {
1327 pInfo->u32EventFlagsOut = fResEvents;
1328 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1329 if (fReqEvents & ~((uint32_t)1 << iEvent))
1330 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1331 else
1332 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1333 rc = VINF_SUCCESS;
1334 }
1335 else if ( fResEvents == UINT32_MAX
1336 || rc == VERR_INTERRUPTED)
1337 {
1338 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
1339 rc = VERR_INTERRUPTED;
1340 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
1341 }
1342 else if (rc == VERR_TIMEOUT)
1343 {
1344 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1345 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
1346 }
1347 else
1348 {
1349 if (RT_SUCCESS(rc))
1350 {
1351 static unsigned s_cErrors = 0;
1352 if (s_cErrors++ < 32)
1353 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
1354 rc = VERR_INTERNAL_ERROR;
1355 }
1356 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1357 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc\n", rc));
1358 }
1359
1360 return rc;
1361}
1362
1363
1364static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1365{
1366 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1367#if defined(RT_OS_SOLARIS)
1368 RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER;
1369#endif
1370 PVBOXGUESTWAIT pWait;
1371 int rc = 0;
1372
1373 Log(("VBoxGuestCommonIOCtl: CANCEL_ALL_WAITEVENTS\n"));
1374
1375 /*
1376 * Walk the event list and wake up anyone with a matching session.
1377 *
1378 * Note! On Solaris we have to do really ugly stuff here because
1379 * RTSemEventMultiSignal cannot be called with interrupts disabled.
1380 * The hack is racy, but what we can we do... (Eliminate this
1381 * termination hack, perhaps?)
1382 */
1383#if defined(RT_OS_SOLARIS)
1384 RTThreadPreemptDisable(&State);
1385 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1386 do
1387 {
1388 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
1389 if ( pWait->pSession == pSession
1390 && pWait->fResEvents != UINT32_MAX)
1391 {
1392 RTSEMEVENTMULTI hEvent = pWait->Event;
1393 pWait->fResEvents = UINT32_MAX;
1394 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1395 /* HACK ALRET! This races wakeup + reuse! */
1396 rc |= RTSemEventMultiSignal(hEvent);
1397 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1398 break;
1399 }
1400 } while (pWait);
1401 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1402 RTThreadPreemptDisable(&State);
1403#else
1404 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1405 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
1406 if (pWait->pSession == pSession)
1407 {
1408 pWait->fResEvents = UINT32_MAX;
1409 rc |= RTSemEventMultiSignal(pWait->Event);
1410 }
1411 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1412#endif
1413 Assert(rc == 0);
1414
1415 return VINF_SUCCESS;
1416}
1417
1418
1419static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1420 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
1421{
1422 Log(("VBoxGuestCommonIOCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
1423
1424 /*
1425 * Validate the header and request size.
1426 */
1427 const VMMDevRequestType enmType = pReqHdr->requestType;
1428 const uint32_t cbReq = pReqHdr->size;
1429 const uint32_t cbMinSize = vmmdevGetRequestSize(enmType);
1430 if (cbReq < cbMinSize)
1431 {
1432 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
1433 cbReq, cbMinSize, enmType));
1434 return VERR_INVALID_PARAMETER;
1435 }
1436 if (cbReq > cbData)
1437 {
1438 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
1439 cbData, cbReq, enmType));
1440 return VERR_INVALID_PARAMETER;
1441 }
1442 int rc = VbglGRVerify(pReqHdr, cbData);
1443 if (RT_FAILURE(rc))
1444 {
1445 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc=%Rrc!!\n",
1446 cbData, cbReq, enmType, rc));
1447 return rc;
1448 }
1449
1450 /*
1451 * Make a copy of the request in the physical memory heap so
1452 * the VBoxGuestLibrary can more easily deal with the request.
1453 * (This is really a waste of time since the OS or the OS specific
1454 * code has already buffered or locked the input/output buffer, but
1455 * it does makes things a bit simpler wrt to phys address.)
1456 */
1457 VMMDevRequestHeader *pReqCopy;
1458 rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
1459 if (RT_FAILURE(rc))
1460 {
1461 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1462 cbReq, cbReq, rc));
1463 return rc;
1464 }
1465 memcpy(pReqCopy, pReqHdr, cbReq);
1466
1467 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
1468 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
1469
1470 rc = VbglGRPerform(pReqCopy);
1471 if ( RT_SUCCESS(rc)
1472 && RT_SUCCESS(pReqCopy->rc))
1473 {
1474 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
1475 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
1476
1477 memcpy(pReqHdr, pReqCopy, cbReq);
1478 if (pcbDataReturned)
1479 *pcbDataReturned = cbReq;
1480 }
1481 else if (RT_FAILURE(rc))
1482 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
1483 else
1484 {
1485 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
1486 rc = pReqCopy->rc;
1487 }
1488
1489 VbglGRFree(pReqCopy);
1490 return rc;
1491}
1492
1493
1494static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, VBoxGuestFilterMaskInfo *pInfo)
1495{
1496 VMMDevCtlGuestFilterMask *pReq;
1497 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
1498 if (RT_FAILURE(rc))
1499 {
1500 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: failed to allocate %u (%#x) bytes to cache the request. rc=%Rrc!!\n",
1501 sizeof(*pReq), sizeof(*pReq), rc));
1502 return rc;
1503 }
1504
1505 pReq->u32OrMask = pInfo->u32OrMask;
1506 pReq->u32NotMask = pInfo->u32NotMask;
1507 pReq->u32NotMask &= ~pDevExt->fFixedEvents; /* don't permit these to be cleared! */
1508 rc = VbglGRPerform(&pReq->header);
1509 if (RT_FAILURE(rc))
1510 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: VbglGRPerform failed, rc=%Rrc!\n", rc));
1511
1512 VbglGRFree(&pReq->header);
1513 return rc;
1514}
1515
1516#ifdef VBOX_WITH_HGCM
1517
1518AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
1519
1520/** Worker for VBoxGuestHGCMAsyncWaitCallback*. */
1521static int VBoxGuestHGCMAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
1522 bool fInterruptible, uint32_t cMillies)
1523{
1524
1525 /*
1526 * Check to see if the condition was met by the time we got here.
1527 *
1528 * We create a simple poll loop here for dealing with out-of-memory
1529 * conditions since the caller isn't necessarily able to deal with
1530 * us returning too early.
1531 */
1532 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1533 PVBOXGUESTWAIT pWait;
1534 for (;;)
1535 {
1536 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1537 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1538 {
1539 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1540 return VINF_SUCCESS;
1541 }
1542 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1543
1544 pWait = VBoxGuestWaitAlloc(pDevExt, NULL);
1545 if (pWait)
1546 break;
1547 if (fInterruptible)
1548 return VERR_INTERRUPTED;
1549 RTThreadSleep(1);
1550 }
1551 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
1552 pWait->pHGCMReq = pHdr;
1553
1554 /*
1555 * Re-enter the spinlock and re-check for the condition.
1556 * If the condition is met, return.
1557 * Otherwise link us into the HGCM wait list and go to sleep.
1558 */
1559 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1560 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1561 {
1562 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1563 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1564 return VINF_SUCCESS;
1565 }
1566 VBoxGuestWaitAppend(&pDevExt->HGCMWaitList, pWait);
1567 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1568
1569 int rc;
1570 if (fInterruptible)
1571 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
1572 else
1573 rc = RTSemEventMultiWait(pWait->Event, cMillies);
1574 if (rc == VERR_SEM_DESTROYED)
1575 return rc;
1576
1577 /*
1578 * Unlink, free and return.
1579 */
1580 if ( RT_FAILURE(rc)
1581 && rc != VERR_TIMEOUT
1582 && ( !fInterruptible
1583 || rc != VERR_INTERRUPTED))
1584 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
1585
1586 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1587 VBoxGuestWaitUnlink(&pDevExt->HGCMWaitList, pWait);
1588 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1589 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1590 return rc;
1591}
1592
1593
1594/**
1595 * This is a callback for dealing with async waits.
1596 *
1597 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1598 */
1599static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
1600{
1601 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1602 Log(("VBoxGuestHGCMAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
1603 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1604 pDevExt,
1605 false /* fInterruptible */,
1606 u32User /* cMillies */);
1607}
1608
1609
1610/**
1611 * This is a callback for dealing with async waits with a timeout.
1612 *
1613 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1614 */
1615static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr,
1616 void *pvUser, uint32_t u32User)
1617{
1618 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1619 Log(("VBoxGuestHGCMAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
1620 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1621 pDevExt,
1622 true /* fInterruptible */,
1623 u32User /* cMillies */ );
1624
1625}
1626
1627
1628static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMConnectInfo *pInfo,
1629 size_t *pcbDataReturned)
1630{
1631 /*
1632 * The VbglHGCMConnect call will invoke the callback if the HGCM
1633 * call is performed in an ASYNC fashion. The function is not able
1634 * to deal with cancelled requests.
1635 */
1636 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
1637 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
1638 ? pInfo->Loc.u.host.achName : "<not local host>"));
1639
1640 int rc = VbglR0HGCMInternalConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1641 if (RT_SUCCESS(rc))
1642 {
1643 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
1644 pInfo->u32ClientID, pInfo->result, rc));
1645 if (RT_SUCCESS(pInfo->result))
1646 {
1647 /*
1648 * Append the client id to the client id table.
1649 * If the table has somehow become filled up, we'll disconnect the session.
1650 */
1651 unsigned i;
1652 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1653 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1654 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1655 if (!pSession->aHGCMClientIds[i])
1656 {
1657 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
1658 break;
1659 }
1660 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1661 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1662 {
1663 static unsigned s_cErrors = 0;
1664 if (s_cErrors++ < 32)
1665 LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
1666
1667 VBoxGuestHGCMDisconnectInfo Info;
1668 Info.result = 0;
1669 Info.u32ClientID = pInfo->u32ClientID;
1670 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1671 return VERR_TOO_MANY_OPEN_FILES;
1672 }
1673 }
1674 if (pcbDataReturned)
1675 *pcbDataReturned = sizeof(*pInfo);
1676 }
1677 return rc;
1678}
1679
1680
1681static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
1682 size_t *pcbDataReturned)
1683{
1684 /*
1685 * Validate the client id and invalidate its entry while we're in the call.
1686 */
1687 const uint32_t u32ClientId = pInfo->u32ClientID;
1688 unsigned i;
1689 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1690 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1691 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1692 if (pSession->aHGCMClientIds[i] == u32ClientId)
1693 {
1694 pSession->aHGCMClientIds[i] = UINT32_MAX;
1695 break;
1696 }
1697 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1698 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1699 {
1700 static unsigned s_cErrors = 0;
1701 if (s_cErrors++ > 32)
1702 LogRel(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
1703 return VERR_INVALID_HANDLE;
1704 }
1705
1706 /*
1707 * The VbglHGCMConnect call will invoke the callback if the HGCM
1708 * call is performed in an ASYNC fashion. The function is not able
1709 * to deal with cancelled requests.
1710 */
1711 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
1712 int rc = VbglR0HGCMInternalDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1713 if (RT_SUCCESS(rc))
1714 {
1715 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
1716 if (pcbDataReturned)
1717 *pcbDataReturned = sizeof(*pInfo);
1718 }
1719
1720 /* Update the client id array according to the result. */
1721 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1722 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
1723 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
1724 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1725
1726 return rc;
1727}
1728
1729
1730static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt,
1731 PVBOXGUESTSESSION pSession,
1732 VBoxGuestHGCMCallInfo *pInfo,
1733 uint32_t cMillies, bool fInterruptible, bool f32bit,
1734 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
1735{
1736 /*
1737 * Some more validations.
1738 */
1739 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
1740 {
1741 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
1742 return VERR_INVALID_PARAMETER;
1743 }
1744 size_t cbActual = cbExtra + sizeof(*pInfo);
1745#ifdef RT_ARCH_AMD64
1746 if (f32bit)
1747 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
1748 else
1749#endif
1750 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
1751 if (cbData < cbActual)
1752 {
1753 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
1754 cbData, cbActual));
1755 return VERR_INVALID_PARAMETER;
1756 }
1757
1758 /*
1759 * Validate the client id.
1760 */
1761 const uint32_t u32ClientId = pInfo->u32ClientID;
1762 unsigned i;
1763 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1764 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1765 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1766 if (pSession->aHGCMClientIds[i] == u32ClientId)
1767 break;
1768 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1769 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
1770 {
1771 static unsigned s_cErrors = 0;
1772 if (s_cErrors++ > 32)
1773 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
1774 return VERR_INVALID_HANDLE;
1775 }
1776
1777 /*
1778 * The VbglHGCMCall call will invoke the callback if the HGCM
1779 * call is performed in an ASYNC fashion. This function can
1780 * deal with cancelled requests, so we let user more requests
1781 * be interruptible (should add a flag for this later I guess).
1782 */
1783 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
1784 int rc;
1785 uint32_t fFlags = pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
1786#ifdef RT_ARCH_AMD64
1787 if (f32bit)
1788 {
1789 if (fInterruptible)
1790 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1791 else
1792 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1793 }
1794 else
1795#endif
1796 {
1797 if (fInterruptible)
1798 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1799 else
1800 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1801 }
1802 if (RT_SUCCESS(rc))
1803 {
1804 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
1805 if (pcbDataReturned)
1806 *pcbDataReturned = cbActual;
1807 }
1808 else
1809 {
1810 if ( rc != VERR_INTERRUPTED
1811 && rc != VERR_TIMEOUT)
1812 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
1813 else
1814 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
1815 }
1816 return rc;
1817}
1818
1819
1820/**
1821 * @returns VBox status code. Unlike the other HGCM IOCtls this will combine
1822 * the VbglHGCMConnect/Disconnect return code with the Info.result.
1823 *
1824 * @param pDevExt The device extension.
1825 * @param pu32ClientId The client id.
1826 * @param pcbDataReturned Where to store the amount of returned data. Can
1827 * be NULL.
1828 */
1829static int VBoxGuestCommonIOCtl_HGCMClipboardReConnect(PVBOXGUESTDEVEXT pDevExt, uint32_t *pu32ClientId, size_t *pcbDataReturned)
1830{
1831 int rc;
1832 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: Current u32ClientId=%RX32\n", pDevExt->u32ClipboardClientId));
1833
1834
1835 /*
1836 * If there is an old client, try disconnect it first.
1837 */
1838 if (pDevExt->u32ClipboardClientId != 0)
1839 {
1840 VBoxGuestHGCMDisconnectInfo Info;
1841 Info.result = VERR_WRONG_ORDER;
1842 Info.u32ClientID = pDevExt->u32ClipboardClientId;
1843 rc = VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1844 if (RT_SUCCESS(rc))
1845 {
1846 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. VbglHGCMDisconnect -> rc=%Rrc\n", rc));
1847 return rc;
1848 }
1849 if (RT_FAILURE((int32_t)Info.result))
1850 {
1851 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. Info.result=%Rrc\n", rc));
1852 return Info.result;
1853 }
1854 pDevExt->u32ClipboardClientId = 0;
1855 }
1856
1857 /*
1858 * Try connect.
1859 */
1860 VBoxGuestHGCMConnectInfo Info;
1861 Info.Loc.type = VMMDevHGCMLoc_LocalHost_Existing;
1862 strcpy(Info.Loc.u.host.achName, "VBoxSharedClipboard");
1863 Info.u32ClientID = 0;
1864 Info.result = VERR_WRONG_ORDER;
1865
1866 rc = VbglR0HGCMInternalConnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1867 if (RT_FAILURE(rc))
1868 {
1869 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1870 return rc;
1871 }
1872 if (RT_FAILURE(Info.result))
1873 {
1874 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1875 return rc;
1876 }
1877
1878 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: connected successfully u32ClientId=%RX32\n", Info.u32ClientID));
1879
1880 pDevExt->u32ClipboardClientId = Info.u32ClientID;
1881 *pu32ClientId = Info.u32ClientID;
1882 if (pcbDataReturned)
1883 *pcbDataReturned = sizeof(uint32_t);
1884
1885 return VINF_SUCCESS;
1886}
1887
1888#endif /* VBOX_WITH_HGCM */
1889
1890/**
1891 * Handle VBOXGUEST_IOCTL_CHECK_BALLOON from R3.
1892 *
1893 * Ask the host for the size of the balloon and try to set it accordingly. If
1894 * this approach fails because it's not supported, return with fHandleInR3 set
1895 * and let the user land supply memory we can lock via the other ioctl.
1896 *
1897 * @returns VBox status code.
1898 *
1899 * @param pDevExt The device extension.
1900 * @param pSession The session.
1901 * @param pInfo The output buffer.
1902 * @param pcbDataReturned Where to store the amount of returned data. Can
1903 * be NULL.
1904 */
1905static int VBoxGuestCommonIOCtl_CheckMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1906 VBoxGuestCheckBalloonInfo *pInfo, size_t *pcbDataReturned)
1907{
1908 VMMDevGetMemBalloonChangeRequest *pReq;
1909 int rc;
1910
1911 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON\n"));
1912 rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
1913 AssertRCReturn(rc, rc);
1914
1915 /*
1916 * The first user trying to query/change the balloon becomes the
1917 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
1918 */
1919 if ( pDevExt->MemBalloon.pOwner != pSession
1920 && pDevExt->MemBalloon.pOwner == NULL)
1921 pDevExt->MemBalloon.pOwner = pSession;
1922
1923 if (pDevExt->MemBalloon.pOwner == pSession)
1924 {
1925 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
1926 if (RT_SUCCESS(rc))
1927 {
1928 /*
1929 * This is a response to that event. Setting this bit means that
1930 * we request the value from the host and change the guest memory
1931 * balloon according to this value.
1932 */
1933 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
1934 rc = VbglGRPerform(&pReq->header);
1935 if (RT_SUCCESS(rc))
1936 {
1937 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
1938 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
1939
1940 pInfo->cBalloonChunks = pReq->cBalloonChunks;
1941 pInfo->fHandleInR3 = false;
1942
1943 rc = vboxGuestSetBalloonSizeKernel(pDevExt, pReq->cBalloonChunks, &pInfo->fHandleInR3);
1944 /* Ignore various out of memory failures. */
1945 if ( rc == VERR_NO_MEMORY
1946 || rc == VERR_NO_PHYS_MEMORY
1947 || rc == VERR_NO_CONT_MEMORY)
1948 rc = VINF_SUCCESS;
1949
1950 if (pcbDataReturned)
1951 *pcbDataReturned = sizeof(VBoxGuestCheckBalloonInfo);
1952 }
1953 else
1954 LogRel(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON: VbglGRPerform failed. rc=%Rrc\n", rc));
1955 VbglGRFree(&pReq->header);
1956 }
1957 }
1958 else
1959 rc = VERR_PERMISSION_DENIED;
1960
1961 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
1962 Log(("VBoxGuestCommonIOCtl: CHECK_MEMORY_BALLOON returns %Rrc\n", rc));
1963 return rc;
1964}
1965
1966
1967/**
1968 * Handle a request for changing the memory balloon.
1969 *
1970 * @returns VBox status code.
1971 *
1972 * @param pDevExt The device extention.
1973 * @param pSession The session.
1974 * @param pInfo The change request structure (input).
1975 * @param pcbDataReturned Where to store the amount of returned data. Can
1976 * be NULL.
1977 */
1978static int VBoxGuestCommonIOCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1979 VBoxGuestChangeBalloonInfo *pInfo, size_t *pcbDataReturned)
1980{
1981 int rc = RTSemFastMutexRequest(pDevExt->MemBalloon.hMtx);
1982 AssertRCReturn(rc, rc);
1983
1984 if (!pDevExt->MemBalloon.fUseKernelAPI)
1985 {
1986 /*
1987 * The first user trying to query/change the balloon becomes the
1988 * owner and owns it until the session is closed (vboxGuestCloseMemBalloon).
1989 */
1990 if ( pDevExt->MemBalloon.pOwner != pSession
1991 && pDevExt->MemBalloon.pOwner == NULL)
1992 pDevExt->MemBalloon.pOwner = pSession;
1993
1994 if (pDevExt->MemBalloon.pOwner == pSession)
1995 {
1996 rc = vboxGuestSetBalloonSizeFromUser(pDevExt, pSession, pInfo->u64ChunkAddr, pInfo->fInflate);
1997 if (pcbDataReturned)
1998 *pcbDataReturned = 0;
1999 }
2000 else
2001 rc = VERR_PERMISSION_DENIED;
2002 }
2003 else
2004 rc = VERR_PERMISSION_DENIED;
2005
2006 RTSemFastMutexRelease(pDevExt->MemBalloon.hMtx);
2007 return rc;
2008}
2009
2010
2011/**
2012 * Guest backdoor logging.
2013 *
2014 * @returns VBox status code.
2015 *
2016 * @param pch The log message (need not be NULL terminated).
2017 * @param cbData Size of the buffer.
2018 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2019 */
2020static int VBoxGuestCommonIOCtl_Log(const char *pch, size_t cbData, size_t *pcbDataReturned)
2021{
2022 NOREF(pch);
2023 NOREF(cbData);
2024 Log(("%.*s", cbData, pch));
2025 if (pcbDataReturned)
2026 *pcbDataReturned = 0;
2027 return VINF_SUCCESS;
2028}
2029
2030
2031/**
2032 * Common IOCtl for user to kernel and kernel to kernel communcation.
2033 *
2034 * This function only does the basic validation and then invokes
2035 * worker functions that takes care of each specific function.
2036 *
2037 * @returns VBox status code.
2038 *
2039 * @param iFunction The requested function.
2040 * @param pDevExt The device extension.
2041 * @param pSession The client session.
2042 * @param pvData The input/output data buffer. Can be NULL depending on the function.
2043 * @param cbData The max size of the data buffer.
2044 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
2045 */
2046int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
2047 void *pvData, size_t cbData, size_t *pcbDataReturned)
2048{
2049 Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
2050 iFunction, pDevExt, pSession, pvData, cbData));
2051
2052 /*
2053 * Make sure the returned data size is set to zero.
2054 */
2055 if (pcbDataReturned)
2056 *pcbDataReturned = 0;
2057
2058 /*
2059 * Define some helper macros to simplify validation.
2060 */
2061#define CHECKRET_RING0(mnemonic) \
2062 do { \
2063 if (pSession->R0Process != NIL_RTR0PROCESS) \
2064 { \
2065 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
2066 pSession->Process, (uintptr_t)pSession->R0Process)); \
2067 return VERR_PERMISSION_DENIED; \
2068 } \
2069 } while (0)
2070#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
2071 do { \
2072 if (cbData < (cbMin)) \
2073 { \
2074 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
2075 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
2076 return VERR_BUFFER_OVERFLOW; \
2077 } \
2078 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
2079 { \
2080 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": Invalid pointer %p\n", pvData)); \
2081 return VERR_INVALID_POINTER; \
2082 } \
2083 } while (0)
2084
2085
2086 /*
2087 * Deal with variably sized requests first.
2088 */
2089 int rc = VINF_SUCCESS;
2090 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
2091 {
2092 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
2093 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
2094 }
2095#ifdef VBOX_WITH_HGCM
2096 /*
2097 * These ones are a bit tricky.
2098 */
2099 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
2100 {
2101 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2102 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2103 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2104 fInterruptible, false /*f32bit*/,
2105 0, cbData, pcbDataReturned);
2106 }
2107 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
2108 {
2109 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2110 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2111 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2112 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2113 false /*f32bit*/,
2114 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2115 }
2116# ifdef RT_ARCH_AMD64
2117 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
2118 {
2119 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2120 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2121 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2122 fInterruptible, true /*f32bit*/,
2123 0, cbData, pcbDataReturned);
2124 }
2125 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
2126 {
2127 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2128 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2129 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2130 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2131 true /*f32bit*/,
2132 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2133 }
2134# endif
2135#endif /* VBOX_WITH_HGCM */
2136 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
2137 {
2138 CHECKRET_MIN_SIZE("LOG", 1);
2139 rc = VBoxGuestCommonIOCtl_Log((char *)pvData, cbData, pcbDataReturned);
2140 }
2141 else
2142 {
2143 switch (iFunction)
2144 {
2145 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
2146 CHECKRET_RING0("GETVMMDEVPORT");
2147 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
2148 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
2149 break;
2150
2151 case VBOXGUEST_IOCTL_WAITEVENT:
2152 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
2153 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, pSession, (VBoxGuestWaitEventInfo *)pvData,
2154 pcbDataReturned, pSession->R0Process != NIL_RTR0PROCESS);
2155 break;
2156
2157 case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
2158 if (cbData != 0)
2159 rc = VERR_INVALID_PARAMETER;
2160 rc = VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
2161 break;
2162
2163 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
2164 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
2165 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, (VBoxGuestFilterMaskInfo *)pvData);
2166 break;
2167
2168#ifdef VBOX_WITH_HGCM
2169 case VBOXGUEST_IOCTL_HGCM_CONNECT:
2170# ifdef RT_ARCH_AMD64
2171 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
2172# endif
2173 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
2174 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
2175 break;
2176
2177 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
2178# ifdef RT_ARCH_AMD64
2179 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
2180# endif
2181 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
2182 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
2183 break;
2184
2185 case VBOXGUEST_IOCTL_CLIPBOARD_CONNECT:
2186 CHECKRET_MIN_SIZE("CLIPBOARD_CONNECT", sizeof(uint32_t));
2187 rc = VBoxGuestCommonIOCtl_HGCMClipboardReConnect(pDevExt, (uint32_t *)pvData, pcbDataReturned);
2188 break;
2189#endif /* VBOX_WITH_HGCM */
2190
2191 case VBOXGUEST_IOCTL_CHECK_BALLOON:
2192 CHECKRET_MIN_SIZE("CHECK_MEMORY_BALLOON", sizeof(VBoxGuestCheckBalloonInfo));
2193 rc = VBoxGuestCommonIOCtl_CheckMemoryBalloon(pDevExt, pSession, (VBoxGuestCheckBalloonInfo *)pvData, pcbDataReturned);
2194 break;
2195
2196 case VBOXGUEST_IOCTL_CHANGE_BALLOON:
2197 CHECKRET_MIN_SIZE("CHANGE_MEMORY_BALLOON", sizeof(VBoxGuestChangeBalloonInfo));
2198 rc = VBoxGuestCommonIOCtl_ChangeMemoryBalloon(pDevExt, pSession, (VBoxGuestChangeBalloonInfo *)pvData, pcbDataReturned);
2199 break;
2200
2201 default:
2202 {
2203 LogRel(("VBoxGuestCommonIOCtl: Unknown request iFunction=%#x Stripped size=%#x\n", iFunction,
2204 VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
2205 rc = VERR_NOT_SUPPORTED;
2206 break;
2207 }
2208 }
2209 }
2210
2211 Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
2212 return rc;
2213}
2214
2215
2216
2217/**
2218 * Common interrupt service routine.
2219 *
2220 * This deals with events and with waking up thread waiting for those events.
2221 *
2222 * @returns true if it was our interrupt, false if it wasn't.
2223 * @param pDevExt The VBoxGuest device extension.
2224 */
2225bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
2226{
2227 bool fMousePositionChanged = false;
2228 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
2229 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
2230 int rc = 0;
2231 bool fOurIrq;
2232
2233 /*
2234 * Make sure we've initalized the device extension.
2235 */
2236 if (RT_UNLIKELY(!pReq))
2237 return false;
2238
2239 /*
2240 * Enter the spinlock and check if it's our IRQ or not.
2241 *
2242 * Note! Solaris cannot do RTSemEventMultiSignal with interrupts disabled
2243 * so we're entering the spinlock without disabling them. This works
2244 * fine as long as we never called in a nested fashion.
2245 */
2246#if defined(RT_OS_SOLARIS)
2247 RTSpinlockAcquire(pDevExt->EventSpinlock, &Tmp);
2248#else
2249 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
2250#endif
2251 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
2252 if (fOurIrq)
2253 {
2254 /*
2255 * Acknowlegde events.
2256 * We don't use VbglGRPerform here as it may take another spinlocks.
2257 */
2258 pReq->header.rc = VERR_INTERNAL_ERROR;
2259 pReq->events = 0;
2260 ASMCompilerBarrier();
2261 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
2262 ASMCompilerBarrier(); /* paranoia */
2263 if (RT_SUCCESS(pReq->header.rc))
2264 {
2265 uint32_t fEvents = pReq->events;
2266 PVBOXGUESTWAIT pWait;
2267
2268 Log(("VBoxGuestCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
2269
2270 /*
2271 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
2272 */
2273 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
2274 {
2275 fMousePositionChanged = true;
2276 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
2277 }
2278
2279#ifdef VBOX_WITH_HGCM
2280 /*
2281 * The HGCM event/list is kind of different in that we evaluate all entries.
2282 */
2283 if (fEvents & VMMDEV_EVENT_HGCM)
2284 {
2285 for (pWait = pDevExt->HGCMWaitList.pHead; pWait; pWait = pWait->pNext)
2286 if ( !pWait->fResEvents
2287 && (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE))
2288 {
2289 pWait->fResEvents = VMMDEV_EVENT_HGCM;
2290 rc |= RTSemEventMultiSignal(pWait->Event);
2291 }
2292 fEvents &= ~VMMDEV_EVENT_HGCM;
2293 }
2294#endif
2295
2296 /*
2297 * Normal FIFO waiter evaluation.
2298 */
2299 fEvents |= pDevExt->f32PendingEvents;
2300 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
2301 if ( (pWait->fReqEvents & fEvents)
2302 && !pWait->fResEvents)
2303 {
2304 pWait->fResEvents = pWait->fReqEvents & fEvents;
2305 fEvents &= ~pWait->fResEvents;
2306 rc |= RTSemEventMultiSignal(pWait->Event);
2307 if (!fEvents)
2308 break;
2309 }
2310 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
2311 }
2312 else /* something is serious wrong... */
2313 Log(("VBoxGuestCommonISR: acknowledge events failed rc=%Rrc (events=%#x)!!\n",
2314 pReq->header.rc, pReq->events));
2315 }
2316 else
2317 LogFlow(("VBoxGuestCommonISR: not ours\n"));
2318
2319 /*
2320 * Work the poll and async notification queues on OSes that implements that.
2321 * Do this outside the spinlock to prevent some recursive spinlocking.
2322 */
2323#if defined(RT_OS_SOLARIS)
2324 RTSpinlockRelease(pDevExt->EventSpinlock, &Tmp);
2325#else
2326 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
2327#endif
2328
2329 if (fMousePositionChanged)
2330 {
2331 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
2332 VBoxGuestNativeISRMousePollEvent(pDevExt);
2333 }
2334
2335 Assert(rc == 0);
2336 return fOurIrq;
2337}
2338
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette