VirtualBox

source: vbox/trunk/src/VBox/Additions/common/VBoxGuest/VBoxGuest.cpp@ 27655

Last change on this file since 27655 was 27118, checked in by vboxsync, 15 years ago

common/VBoxGuest: fixed the ballooning todos

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 80.3 KB
Line 
1/* $Id: VBoxGuest.cpp 27118 2010-03-05 17:41:52Z vboxsync $ */
2/** @file
3 * VBoxGuest - Guest Additions Driver, Common Code.
4 */
5
6/*
7 * Copyright (C) 2007-2009 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_DEFAULT
27#include "VBoxGuestInternal.h"
28#include <VBox/VMMDev.h> /* for VMMDEV_RAM_SIZE */
29#include <VBox/log.h>
30#include <iprt/mem.h>
31#include <iprt/time.h>
32#include <iprt/memobj.h>
33#include <iprt/asm.h>
34#include <iprt/string.h>
35#include <iprt/process.h>
36#include <iprt/assert.h>
37#include <iprt/param.h>
38#ifdef VBOX_WITH_HGCM
39# include <iprt/thread.h>
40#endif
41
42
43/*******************************************************************************
44* Internal Functions *
45*******************************************************************************/
46#ifdef VBOX_WITH_HGCM
47static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdrNonVolatile, void *pvUser, uint32_t u32User);
48#endif
49
50
51/*******************************************************************************
52* Global Variables *
53*******************************************************************************/
54static const size_t cbChangeMemBalloonReq = RT_OFFSETOF(VMMDevChangeMemBalloon, aPhysPage[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES]);
55
56
57
58/**
59 * Reserves memory in which the VMM can relocate any guest mappings
60 * that are floating around.
61 *
62 * This operation is a little bit tricky since the VMM might not accept
63 * just any address because of address clashes between the three contexts
64 * it operates in, so use a small stack to perform this operation.
65 *
66 * @returns VBox status code (ignored).
67 * @param pDevExt The device extension.
68 */
69static int vboxGuestInitFixateGuestMappings(PVBOXGUESTDEVEXT pDevExt)
70{
71 /*
72 * Query the required space.
73 */
74 VMMDevReqHypervisorInfo *pReq;
75 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_GetHypervisorInfo);
76 if (RT_FAILURE(rc))
77 return rc;
78 pReq->hypervisorStart = 0;
79 pReq->hypervisorSize = 0;
80 rc = VbglGRPerform(&pReq->header);
81 if (RT_FAILURE(rc)) /* this shouldn't happen! */
82 {
83 VbglGRFree(&pReq->header);
84 return rc;
85 }
86
87 /*
88 * The VMM will report back if there is nothing it wants to map, like for
89 * insance in VT-x and AMD-V mode.
90 */
91 if (pReq->hypervisorSize == 0)
92 Log(("vboxGuestInitFixateGuestMappings: nothing to do\n"));
93 else
94 {
95 /*
96 * We have to try several times since the host can be picky
97 * about certain addresses.
98 */
99 RTR0MEMOBJ hFictive = NIL_RTR0MEMOBJ;
100 uint32_t cbHypervisor = pReq->hypervisorSize;
101 RTR0MEMOBJ ahTries[5];
102 uint32_t iTry;
103 bool fBitched = false;
104 Log(("vboxGuestInitFixateGuestMappings: cbHypervisor=%#x\n", cbHypervisor));
105 for (iTry = 0; iTry < RT_ELEMENTS(ahTries); iTry++)
106 {
107 /*
108 * Reserve space, or if that isn't supported, create a object for
109 * some fictive physical memory and map that in to kernel space.
110 *
111 * To make the code a bit uglier, most systems cannot help with
112 * 4MB alignment, so we have to deal with that in addition to
113 * having two ways of getting the memory.
114 */
115 uint32_t uAlignment = _4M;
116 RTR0MEMOBJ hObj;
117 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M), uAlignment);
118 if (rc == VERR_NOT_SUPPORTED)
119 {
120 uAlignment = PAGE_SIZE;
121 rc = RTR0MemObjReserveKernel(&hObj, (void *)-1, RT_ALIGN_32(cbHypervisor, _4M) + _4M, uAlignment);
122 }
123 if (rc == VERR_NOT_SUPPORTED)
124 {
125 if (hFictive == NIL_RTR0MEMOBJ)
126 {
127 rc = RTR0MemObjEnterPhys(&hObj, VBOXGUEST_HYPERVISOR_PHYSICAL_START, cbHypervisor + _4M);
128 if (RT_FAILURE(rc))
129 break;
130 hFictive = hObj;
131 }
132 uAlignment = _4M;
133 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
134 if (rc == VERR_NOT_SUPPORTED)
135 {
136 uAlignment = PAGE_SIZE;
137 rc = RTR0MemObjMapKernel(&hObj, hFictive, (void *)-1, uAlignment, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
138 }
139 }
140 if (RT_FAILURE(rc))
141 {
142 LogRel(("VBoxGuest: Failed to reserve memory for the hypervisor: rc=%Rrc (cbHypervisor=%#x uAlignment=%#x iTry=%u)\n",
143 rc, cbHypervisor, uAlignment, iTry));
144 fBitched = true;
145 break;
146 }
147
148 /*
149 * Try set it.
150 */
151 pReq->header.requestType = VMMDevReq_SetHypervisorInfo;
152 pReq->header.rc = VERR_INTERNAL_ERROR;
153 pReq->hypervisorSize = cbHypervisor;
154 pReq->hypervisorStart = (uintptr_t)RTR0MemObjAddress(hObj);
155 if ( uAlignment == PAGE_SIZE
156 && pReq->hypervisorStart & (_4M - 1))
157 pReq->hypervisorStart = RT_ALIGN_32(pReq->hypervisorStart, _4M);
158 AssertMsg(RT_ALIGN_32(pReq->hypervisorStart, _4M) == pReq->hypervisorStart, ("%#x\n", pReq->hypervisorStart));
159
160 rc = VbglGRPerform(&pReq->header);
161 if (RT_SUCCESS(rc))
162 {
163 pDevExt->hGuestMappings = hFictive != NIL_RTR0MEMOBJ ? hFictive : hObj;
164 Log(("VBoxGuest: %p LB %#x; uAlignment=%#x iTry=%u hGuestMappings=%p (%s)\n",
165 RTR0MemObjAddress(pDevExt->hGuestMappings),
166 RTR0MemObjSize(pDevExt->hGuestMappings),
167 uAlignment, iTry, pDevExt->hGuestMappings, hFictive != NIL_RTR0PTR ? "fictive" : "reservation"));
168 break;
169 }
170 ahTries[iTry] = hObj;
171 }
172
173 /*
174 * Cleanup failed attempts.
175 */
176 while (iTry-- > 0)
177 RTR0MemObjFree(ahTries[iTry], false /* fFreeMappings */);
178 if ( RT_FAILURE(rc)
179 && hFictive != NIL_RTR0PTR)
180 RTR0MemObjFree(hFictive, false /* fFreeMappings */);
181 if (RT_FAILURE(rc) && !fBitched)
182 LogRel(("VBoxGuest: Warning: failed to reserve %#d of memory for guest mappings.\n", cbHypervisor));
183 }
184 VbglGRFree(&pReq->header);
185
186 /*
187 * We ignore failed attempts for now.
188 */
189 return VINF_SUCCESS;
190}
191
192
193/**
194 * Undo what vboxGuestInitFixateGuestMappings did.
195 *
196 * @param pDevExt The device extension.
197 */
198static void vboxGuestTermUnfixGuestMappings(PVBOXGUESTDEVEXT pDevExt)
199{
200 if (pDevExt->hGuestMappings != NIL_RTR0PTR)
201 {
202 /*
203 * Tell the host that we're going to free the memory we reserved for
204 * it, the free it up. (Leak the memory if anything goes wrong here.)
205 */
206 VMMDevReqHypervisorInfo *pReq;
207 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevReqHypervisorInfo), VMMDevReq_SetHypervisorInfo);
208 if (RT_SUCCESS(rc))
209 {
210 pReq->hypervisorStart = 0;
211 pReq->hypervisorSize = 0;
212 rc = VbglGRPerform(&pReq->header);
213 VbglGRFree(&pReq->header);
214 }
215 if (RT_SUCCESS(rc))
216 {
217 rc = RTR0MemObjFree(pDevExt->hGuestMappings, true /* fFreeMappings */);
218 AssertRC(rc);
219 }
220 else
221 LogRel(("vboxGuestTermUnfixGuestMappings: Failed to unfix the guest mappings! rc=%Rrc\n", rc));
222
223 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
224 }
225}
226
227
228/**
229 * Sets the interrupt filter mask during initialization and termination.
230 *
231 * This will ASSUME that we're the ones in carge over the mask, so
232 * we'll simply clear all bits we don't set.
233 *
234 * @returns VBox status code (ignored).
235 * @param pDevExt The device extension.
236 * @param fMask The new mask.
237 */
238static int vboxGuestSetFilterMask(PVBOXGUESTDEVEXT pDevExt, uint32_t fMask)
239{
240 VMMDevCtlGuestFilterMask *pReq;
241 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
242 if (RT_SUCCESS(rc))
243 {
244 pReq->u32OrMask = fMask;
245 pReq->u32NotMask = ~fMask;
246 rc = VbglGRPerform(&pReq->header);
247 if ( RT_FAILURE(rc)
248 || RT_FAILURE(pReq->header.rc))
249 LogRel(("vboxGuestSetFilterMask: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
250 rc, pReq->header.rc));
251 VbglGRFree(&pReq->header);
252 }
253 return rc;
254}
255
256
257/**
258 * Report guest information to the VMMDev.
259 *
260 * @returns VBox status code.
261 * @param pDevExt The device extension.
262 * @param enmOSType The OS type to report.
263 */
264static int vboxGuestInitReportGuestInfo(PVBOXGUESTDEVEXT pDevExt, VBOXOSTYPE enmOSType)
265{
266 VMMDevReportGuestInfo *pReq;
267 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_ReportGuestInfo);
268 if (RT_SUCCESS(rc))
269 {
270 pReq->guestInfo.additionsVersion = VMMDEV_VERSION;
271 pReq->guestInfo.osType = enmOSType;
272 rc = VbglGRPerform(&pReq->header);
273 if ( RT_FAILURE(rc)
274 || RT_FAILURE(pReq->header.rc))
275 LogRel(("vboxGuestInitReportGuestInfo: failed with rc=%Rrc and VMMDev rc=%Rrc\n",
276 rc, pReq->header.rc));
277 VbglGRFree(&pReq->header);
278 }
279 return rc;
280}
281
282
283/**
284 * Inflate the balloon by one chunk represented by an R0 memory object.
285 *
286 * @returns IPRT status code.
287 * @param pMemObj Pointer to the R0 memory object.
288 * @param pReq The pre-allocated request for performing the VMMDev call.
289 */
290static int vboxGuestBalloonInflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
291{
292 uint32_t iPage;
293 int rc;
294
295 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
296 {
297 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
298 pReq->aPhysPage[iPage] = phys;
299 }
300
301 /* Protect this memory from being accessed. Doesn't work on every platform and probably
302 * doesn't work for R3-provided memory, therefore ignore the return value. Unprotect
303 * done when object is freed. */
304 RTR0MemObjProtect(*pMemObj, 0, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, RTMEM_PROT_NONE);
305
306 pReq->fInflate = true;
307 pReq->header.size = cbChangeMemBalloonReq;
308 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
309
310 rc = VbglGRPerform(&pReq->header);
311 if (RT_FAILURE(rc))
312 LogRel(("vboxGuestBalloonInflate: VbglGRPerform failed. rc=%d\n", rc));
313 return rc;
314}
315
316
317/**
318 * Deflate the balloon by one chunk - info the host and free the memory object.
319 *
320 * @returns IPRT status code.
321 * @param pMemObj Pointer to the R0 memory object.
322 * The memory object will be freed afterwards.
323 * @param pReq The pre-allocated request for performing the VMMDev call.
324 */
325static int vboxGuestBalloonDeflate(PRTR0MEMOBJ pMemObj, VMMDevChangeMemBalloon *pReq)
326{
327 uint32_t iPage;
328 int rc;
329
330 for (iPage = 0; iPage < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; iPage++)
331 {
332 RTHCPHYS phys = RTR0MemObjGetPagePhysAddr(*pMemObj, iPage);
333 pReq->aPhysPage[iPage] = phys;
334 }
335
336 pReq->fInflate = false;
337 pReq->header.size = cbChangeMemBalloonReq;
338 pReq->cPages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
339
340 rc = VbglGRPerform(&pReq->header);
341 if (RT_FAILURE(rc))
342 {
343 LogRel(("vboxGuestBalloonDeflate: VbglGRPerform failed. rc=%d\n", rc));
344 return rc;
345 }
346
347 /* undo previous protec call, ignore rc for reasons stated there. */
348 RTR0MemObjProtect(*pMemObj, 0, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE);
349 /*RTR0MemObjProtect(*pMemObj, 0, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC); - probably not safe... */
350
351 rc = RTR0MemObjFree(*pMemObj, true);
352 if (RT_FAILURE(rc))
353 {
354 LogRel(("vboxGuestBalloonDeflate: RTR0MemObjFree(%p,true) -> %Rrc; this is *BAD*!\n", *pMemObj, rc));
355 return rc;
356 }
357
358 *pMemObj = NIL_RTR0MEMOBJ;
359 return VINF_SUCCESS;
360}
361
362
363/**
364 * Inflate/deflate the memory balloon and notify the host.
365 *
366 * @returns VBox status code.
367 * @param pDevExt The device extension.
368 * @param pSession The session.
369 * @param cBalloonChunks The new size of the balloon in chunks of 1MB.
370 */
371static int vboxGuestSetBalloonSizeKernel(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, uint32_t cBalloonChunks)
372{
373 int rc = VINF_SUCCESS;
374
375 if (pDevExt->MemBalloon.fUseKernelAPI)
376 {
377 VMMDevChangeMemBalloon *pReq;
378 uint32_t i;
379
380 if (cBalloonChunks > pDevExt->MemBalloon.cMaxChunks)
381 {
382 LogRel(("vboxGuestSetBalloonSizeKernel: illegal balloon size %u (max=%u)\n",
383 cBalloonChunks, pDevExt->MemBalloon.cMaxChunks));
384 return VERR_INVALID_PARAMETER;
385 }
386
387 if (cBalloonChunks == pDevExt->MemBalloon.cMaxChunks)
388 return VINF_SUCCESS; /* nothing to do */
389
390 if ( cBalloonChunks > pDevExt->MemBalloon.cChunks
391 && !pDevExt->MemBalloon.paMemObj)
392 {
393 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAllocZ(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
394 if (!pDevExt->MemBalloon.paMemObj)
395 {
396 LogRel(("VBoxGuestSetBalloonSizeKernel: no memory for paMemObj!\n"));
397 return VERR_NO_MEMORY;
398 }
399 }
400
401 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
402 if (RT_FAILURE(rc))
403 return rc;
404
405 if (cBalloonChunks > pDevExt->MemBalloon.cChunks)
406 {
407 /* inflate */
408 for (i = pDevExt->MemBalloon.cChunks; i < cBalloonChunks; i++)
409 {
410 rc = RTR0MemObjAllocPhysNC(&pDevExt->MemBalloon.paMemObj[i],
411 VMMDEV_MEMORY_BALLOON_CHUNK_SIZE, NIL_RTHCPHYS);
412 if (RT_FAILURE(rc))
413 {
414 if (rc == VERR_NOT_SUPPORTED)
415 {
416 /* not supported -- fall back to the R3-allocated memory */
417 pDevExt->MemBalloon.fUseKernelAPI = false;
418 Assert(pDevExt->MemBalloon.cChunks == 0);
419 Log(("VBoxGuestSetBalloonSizeKernel: PhysNC allocs not supported, falling back to R3 allocs.\n"));
420 }
421 /* else if (rc == VERR_NO_MEMORY): cannot allocate more memory => don't try further, just stop here */
422 /* else: XXX what else can fail? VERR_MEMOBJ_INIT_FAILED for instance. just stop. */
423 break;
424 }
425
426 rc = vboxGuestBalloonInflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
427 if (RT_FAILURE(rc))
428 {
429 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
430 RTR0MemObjFree(pDevExt->MemBalloon.paMemObj[i], true);
431 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
432 break;
433 }
434 pDevExt->MemBalloon.cChunks++;
435 }
436 }
437 else
438 {
439 /* deflate */
440 for (i = pDevExt->MemBalloon.cChunks; i-- > cBalloonChunks;)
441 {
442 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
443 if (RT_FAILURE(rc))
444 {
445 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
446 break;
447 }
448 pDevExt->MemBalloon.cChunks--;
449 }
450 }
451
452 VbglGRFree(&pReq->header);
453 }
454
455 if (!pDevExt->MemBalloon.fUseKernelAPI)
456 {
457 /* R3 to allocate memory, then do ioctl(VBOXGUEST_IOCTL_CHANGE_BALLOON)
458 * and R0 to lock it down and tell the host. */
459 rc = VERR_NO_PHYS_MEMORY;
460 }
461
462 return rc;
463}
464
465
466/**
467 * Inflate/deflate the balloon by one chunk.
468 *
469 * @returns VBox status code.
470 * @param pDevExt The device extension.
471 * @param pSession The session.
472 * @param u64ChunkAddr The address of the chunk to add to / remove from the
473 * balloon.
474 * @param fInflate Inflate if true, deflate if false.
475 */
476static int vboxGuestSetBalloonSizeFromUser(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
477 uint64_t u64ChunkAddr, bool fInflate)
478{
479 VMMDevChangeMemBalloon *pReq;
480 int rc = VINF_SUCCESS;
481 uint32_t i;
482 PRTR0MEMOBJ pMemObj = NULL;
483
484 if (fInflate)
485 {
486 if ( pDevExt->MemBalloon.cChunks > pDevExt->MemBalloon.cMaxChunks - 1
487 || pDevExt->MemBalloon.cMaxChunks == 0 /* If called without first querying. */)
488 {
489 LogRel(("vboxGuestSetBalloonSize: cannot inflate balloon, already have %u chunks (max=%u)\n",
490 pDevExt->MemBalloon.cChunks, pDevExt->MemBalloon.cMaxChunks));
491 return VERR_INVALID_PARAMETER;
492 }
493
494 if (!pDevExt->MemBalloon.paMemObj)
495 {
496 pDevExt->MemBalloon.paMemObj = (PRTR0MEMOBJ)RTMemAlloc(sizeof(RTR0MEMOBJ) * pDevExt->MemBalloon.cMaxChunks);
497 if (!pDevExt->MemBalloon.paMemObj)
498 {
499 LogRel(("VBoxGuestSetBalloonSizeFromUser: no memory for paMemObj!\n"));
500 return VERR_NO_MEMORY;
501 }
502 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
503 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
504 }
505 }
506 else
507 {
508 if (pDevExt->MemBalloon.cChunks == 0)
509 {
510 AssertMsgFailed(("vboxGuestSetBalloonSize: cannot decrease balloon, already at size 0\n"));
511 return VERR_INVALID_PARAMETER;
512 }
513 }
514
515 /*
516 * Enumerate all memory objects and check if the object is already registered.
517 */
518 for (i = 0; i < pDevExt->MemBalloon.cMaxChunks; i++)
519 {
520 if ( fInflate
521 && !pMemObj
522 && pDevExt->MemBalloon.paMemObj[i] == NIL_RTR0MEMOBJ)
523 pMemObj = &pDevExt->MemBalloon.paMemObj[i]; /* found free object pointer */
524 if (RTR0MemObjAddressR3(pDevExt->MemBalloon.paMemObj[i]) == u64ChunkAddr)
525 {
526 if (fInflate)
527 return VERR_ALREADY_EXISTS; /* don't provide the same memory twice */
528 pMemObj = &pDevExt->MemBalloon.paMemObj[i];
529 break;
530 }
531 }
532 if (!pMemObj)
533 {
534 if (fInflate)
535 {
536 /* no free object pointer found -- should not happen */
537 return VERR_NO_MEMORY;
538 }
539
540 /* cannot free this memory as it wasn't provided before */
541 return VERR_NOT_FOUND;
542 }
543
544 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
545 if (RT_FAILURE(rc))
546 return rc;
547
548 if (fInflate)
549 {
550 rc = RTR0MemObjLockUser(pMemObj, u64ChunkAddr, VMMDEV_MEMORY_BALLOON_CHUNK_SIZE,
551 RTMEM_PROT_READ | RTMEM_PROT_WRITE, NIL_RTR0PROCESS);
552 if (RT_SUCCESS(rc))
553 {
554 rc = vboxGuestBalloonInflate(pMemObj, pReq);
555 if (RT_SUCCESS(rc))
556 pDevExt->MemBalloon.cChunks++;
557 else
558 {
559 Log(("vboxGuestSetBalloonSize(inflate): failed, rc=%Rrc!\n", rc));
560 RTR0MemObjFree(*pMemObj, true);
561 *pMemObj = NIL_RTR0MEMOBJ;
562 }
563 }
564 }
565 else
566 {
567 rc = vboxGuestBalloonDeflate(pMemObj, pReq);
568 if (RT_SUCCESS(rc))
569 pDevExt->MemBalloon.cChunks--;
570 else
571 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
572 }
573
574 VbglGRFree(&pReq->header);
575 return rc;
576}
577
578
579/**
580 * Cleanup the memory balloon of a session.
581 *
582 * @param pDevExt The device extension.
583 * @param pDevExt The session. Can be NULL if no owner check required.
584 */
585static void vboxGuestCloseMemBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
586{
587 if ( pSession != (PVBOXGUESTSESSION)NULL
588 && ASMAtomicReadPtr((void * volatile *)&pDevExt->MemBalloon.pOwner) != pSession)
589 return;
590
591 if (pDevExt->MemBalloon.paMemObj)
592 {
593 VMMDevChangeMemBalloon *pReq;
594 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, cbChangeMemBalloonReq, VMMDevReq_ChangeMemBalloon);
595 if (RT_SUCCESS(rc))
596 {
597 uint32_t i;
598 for (i = pDevExt->MemBalloon.cChunks; i-- > 0;)
599 {
600 rc = vboxGuestBalloonDeflate(&pDevExt->MemBalloon.paMemObj[i], pReq);
601 if (RT_FAILURE(rc))
602 {
603 Log(("vboxGuestSetBalloonSize(deflate): failed, rc=%Rrc!\n", rc));
604 break;
605 }
606 pDevExt->MemBalloon.paMemObj[i] = NIL_RTR0MEMOBJ;
607 pDevExt->MemBalloon.cChunks--;
608 }
609 VbglGRFree(&pReq->header);
610 }
611 RTMemFree(pDevExt->MemBalloon.paMemObj);
612 pDevExt->MemBalloon.paMemObj = NULL;
613 }
614
615 ASMAtomicWritePtr((void * volatile *)&pDevExt->MemBalloon.pOwner, NULL);
616}
617
618
619/**
620 * Init the variables for memory ballooning.
621 *
622 * @param pDevExt The device extension
623 */
624static void vboxGuestInitMemBalloon(PVBOXGUESTDEVEXT pDevExt)
625{
626 pDevExt->MemBalloon.cChunks = 0;
627 pDevExt->MemBalloon.cMaxChunks = 0;
628 pDevExt->MemBalloon.fUseKernelAPI = true;
629 pDevExt->MemBalloon.paMemObj = NULL;
630 ASMAtomicWritePtr((void * volatile *)&pDevExt->MemBalloon.pOwner, NULL);
631}
632
633
634/**
635 * Initializes the VBoxGuest device extension when the
636 * device driver is loaded.
637 *
638 * The native code locates the VMMDev on the PCI bus and retrieve
639 * the MMIO and I/O port ranges, this function will take care of
640 * mapping the MMIO memory (if present). Upon successful return
641 * the native code should set up the interrupt handler.
642 *
643 * @returns VBox status code.
644 *
645 * @param pDevExt The device extension. Allocated by the native code.
646 * @param IOPortBase The base of the I/O port range.
647 * @param pvMMIOBase The base of the MMIO memory mapping.
648 * This is optional, pass NULL if not present.
649 * @param cbMMIO The size of the MMIO memory mapping.
650 * This is optional, pass 0 if not present.
651 * @param enmOSType The guest OS type to report to the VMMDev.
652 * @param fFixedEvents Events that will be enabled upon init and no client
653 * will ever be allowed to mask.
654 */
655int VBoxGuestInitDevExt(PVBOXGUESTDEVEXT pDevExt, uint16_t IOPortBase,
656 void *pvMMIOBase, uint32_t cbMMIO, VBOXOSTYPE enmOSType, uint32_t fFixedEvents)
657{
658 int rc, rc2;
659
660 /*
661 * Adjust fFixedEvents.
662 */
663#ifdef VBOX_WITH_HGCM
664 fFixedEvents |= VMMDEV_EVENT_HGCM;
665#endif
666
667 /*
668 * Initalize the data.
669 */
670 pDevExt->IOPortBase = IOPortBase;
671 pDevExt->pVMMDevMemory = NULL;
672 pDevExt->fFixedEvents = fFixedEvents;
673 pDevExt->hGuestMappings = NIL_RTR0MEMOBJ;
674 pDevExt->pIrqAckEvents = NULL;
675 pDevExt->PhysIrqAckEvents = NIL_RTCCPHYS;
676 pDevExt->WaitList.pHead = NULL;
677 pDevExt->WaitList.pTail = NULL;
678#ifdef VBOX_WITH_HGCM
679 pDevExt->HGCMWaitList.pHead = NULL;
680 pDevExt->HGCMWaitList.pTail = NULL;
681#endif
682 pDevExt->FreeList.pHead = NULL;
683 pDevExt->FreeList.pTail = NULL;
684 pDevExt->f32PendingEvents = 0;
685 pDevExt->u32ClipboardClientId = 0;
686 pDevExt->u32MousePosChangedSeq = 0;
687
688 /*
689 * If there is an MMIO region validate the version and size.
690 */
691 if (pvMMIOBase)
692 {
693 VMMDevMemory *pVMMDev = (VMMDevMemory *)pvMMIOBase;
694 Assert(cbMMIO);
695 if ( pVMMDev->u32Version == VMMDEV_MEMORY_VERSION
696 && pVMMDev->u32Size >= 32
697 && pVMMDev->u32Size <= cbMMIO)
698 {
699 pDevExt->pVMMDevMemory = pVMMDev;
700 Log(("VBoxGuestInitDevExt: VMMDevMemory: mapping=%p size=%#RX32 (%#RX32) version=%#RX32\n",
701 pVMMDev, pVMMDev->u32Size, cbMMIO, pVMMDev->u32Version));
702 }
703 else /* try live without it. */
704 LogRel(("VBoxGuestInitDevExt: Bogus VMMDev memory; u32Version=%RX32 (expected %RX32) u32Size=%RX32 (expected <= %RX32)\n",
705 pVMMDev->u32Version, VMMDEV_MEMORY_VERSION, pVMMDev->u32Size, cbMMIO));
706 }
707
708 /*
709 * Create the wait and seesion spinlocks.
710 */
711 rc = RTSpinlockCreate(&pDevExt->EventSpinlock);
712 if (RT_SUCCESS(rc))
713 rc = RTSpinlockCreate(&pDevExt->SessionSpinlock);
714 if (RT_FAILURE(rc))
715 {
716 LogRel(("VBoxGuestInitDevExt: failed to spinlock, rc=%d!\n", rc));
717 if (pDevExt->EventSpinlock != NIL_RTSPINLOCK)
718 RTSpinlockDestroy(pDevExt->EventSpinlock);
719 return rc;
720 }
721
722 /*
723 * Initialize the guest library and report the guest info back to VMMDev,
724 * set the interrupt control filter mask, and fixate the guest mappings
725 * made by the VMM.
726 */
727 rc = VbglInit(pDevExt->IOPortBase, (VMMDevMemory *)pDevExt->pVMMDevMemory);
728 if (RT_SUCCESS(rc))
729 {
730 rc = VbglGRAlloc((VMMDevRequestHeader **)&pDevExt->pIrqAckEvents, sizeof(VMMDevEvents), VMMDevReq_AcknowledgeEvents);
731 if (RT_SUCCESS(rc))
732 {
733 pDevExt->PhysIrqAckEvents = VbglPhysHeapGetPhysAddr(pDevExt->pIrqAckEvents);
734 Assert(pDevExt->PhysIrqAckEvents != 0);
735
736 rc = vboxGuestInitReportGuestInfo(pDevExt, enmOSType);
737 if (RT_SUCCESS(rc))
738 {
739 rc = vboxGuestSetFilterMask(pDevExt, fFixedEvents);
740 if (RT_SUCCESS(rc))
741 {
742 /*
743 * Disable guest graphics capability by default. The guest specific
744 * graphics driver will re-enable this when it is necessary.
745 */
746 rc = VBoxGuestSetGuestCapabilities(0, VMMDEV_GUEST_SUPPORTS_GRAPHICS);
747 if (RT_SUCCESS(rc))
748 {
749 vboxGuestInitFixateGuestMappings(pDevExt);
750 vboxGuestInitMemBalloon(pDevExt);
751 Log(("VBoxGuestInitDevExt: returns success\n"));
752 return VINF_SUCCESS;
753 }
754 else
755 LogRel(("VBoxGuestInitDevExt: VBoxGuestSetGuestCapabilities failed, rc=%Rrc\n", rc));
756 }
757 else
758 LogRel(("VBoxGuestInitDevExt: vboxGuestSetFilterMask failed, rc=%Rrc\n", rc));
759 }
760 else
761 LogRel(("VBoxGuestInitDevExt: vboxGuestInitReportGuestInfo failed, rc=%Rrc\n", rc));
762
763 VbglGRFree((VMMDevRequestHeader *)pDevExt->pIrqAckEvents);
764 }
765 else
766 LogRel(("VBoxGuestInitDevExt: VBoxGRAlloc failed, rc=%Rrc\n", rc));
767
768 VbglTerminate();
769 }
770 else
771 LogRel(("VBoxGuestInitDevExt: VbglInit failed, rc=%Rrc\n", rc));
772
773 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
774 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
775 return rc; /* (failed) */
776}
777
778
779/**
780 * Deletes all the items in a wait chain.
781 * @param pWait The head of the chain.
782 */
783static void VBoxGuestDeleteWaitList(PVBOXGUESTWAITLIST pList)
784{
785 while (pList->pHead)
786 {
787 int rc2;
788 PVBOXGUESTWAIT pWait = pList->pHead;
789 pList->pHead = pWait->pNext;
790
791 pWait->pNext = NULL;
792 pWait->pPrev = NULL;
793 rc2 = RTSemEventMultiDestroy(pWait->Event); AssertRC(rc2);
794 pWait->Event = NIL_RTSEMEVENTMULTI;
795 pWait->pSession = NULL;
796 RTMemFree(pWait);
797 }
798 pList->pHead = NULL;
799 pList->pTail = NULL;
800}
801
802
803/**
804 * Destroys the VBoxGuest device extension.
805 *
806 * The native code should call this before the driver is loaded,
807 * but don't call this on shutdown.
808 *
809 * @param pDevExt The device extension.
810 */
811void VBoxGuestDeleteDevExt(PVBOXGUESTDEVEXT pDevExt)
812{
813 int rc2;
814 Log(("VBoxGuestDeleteDevExt:\n"));
815 Log(("VBoxGuest: The additions driver is terminating.\n"));
816
817 /*
818 * Unfix the guest mappings, filter all events and clear
819 * all capabilities.
820 */
821 vboxGuestTermUnfixGuestMappings(pDevExt);
822 VBoxGuestSetGuestCapabilities(0, UINT32_MAX);
823 vboxGuestSetFilterMask(pDevExt, 0);
824 vboxGuestCloseMemBalloon(pDevExt, (PVBOXGUESTSESSION)NULL);
825
826 /*
827 * Cleanup resources.
828 */
829 rc2 = RTSpinlockDestroy(pDevExt->EventSpinlock); AssertRC(rc2);
830 rc2 = RTSpinlockDestroy(pDevExt->SessionSpinlock); AssertRC(rc2);
831
832 VBoxGuestDeleteWaitList(&pDevExt->WaitList);
833#ifdef VBOX_WITH_HGCM
834 VBoxGuestDeleteWaitList(&pDevExt->HGCMWaitList);
835#endif
836 VBoxGuestDeleteWaitList(&pDevExt->FreeList);
837
838 VbglTerminate();
839
840 pDevExt->pVMMDevMemory = NULL;
841
842 pDevExt->IOPortBase = 0;
843 pDevExt->pIrqAckEvents = NULL;
844}
845
846
847/**
848 * Creates a VBoxGuest user session.
849 *
850 * The native code calls this when a ring-3 client opens the device.
851 * Use VBoxGuestCreateKernelSession when a ring-0 client connects.
852 *
853 * @returns VBox status code.
854 * @param pDevExt The device extension.
855 * @param ppSession Where to store the session on success.
856 */
857int VBoxGuestCreateUserSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
858{
859 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
860 if (RT_UNLIKELY(!pSession))
861 {
862 LogRel(("VBoxGuestCreateUserSession: no memory!\n"));
863 return VERR_NO_MEMORY;
864 }
865
866 pSession->Process = RTProcSelf();
867 pSession->R0Process = RTR0ProcHandleSelf();
868 pSession->pDevExt = pDevExt;
869
870 *ppSession = pSession;
871 LogFlow(("VBoxGuestCreateUserSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
872 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
873 return VINF_SUCCESS;
874}
875
876
877/**
878 * Creates a VBoxGuest kernel session.
879 *
880 * The native code calls this when a ring-0 client connects to the device.
881 * Use VBoxGuestCreateUserSession when a ring-3 client opens the device.
882 *
883 * @returns VBox status code.
884 * @param pDevExt The device extension.
885 * @param ppSession Where to store the session on success.
886 */
887int VBoxGuestCreateKernelSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION *ppSession)
888{
889 PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)RTMemAllocZ(sizeof(*pSession));
890 if (RT_UNLIKELY(!pSession))
891 {
892 LogRel(("VBoxGuestCreateKernelSession: no memory!\n"));
893 return VERR_NO_MEMORY;
894 }
895
896 pSession->Process = NIL_RTPROCESS;
897 pSession->R0Process = NIL_RTR0PROCESS;
898 pSession->pDevExt = pDevExt;
899
900 *ppSession = pSession;
901 LogFlow(("VBoxGuestCreateKernelSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
902 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
903 return VINF_SUCCESS;
904}
905
906
907
908/**
909 * Closes a VBoxGuest session.
910 *
911 * @param pDevExt The device extension.
912 * @param pSession The session to close (and free).
913 */
914void VBoxGuestCloseSession(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
915{
916 unsigned i; NOREF(i);
917 Log(("VBoxGuestCloseSession: pSession=%p proc=%RTproc (%d) r0proc=%p\n",
918 pSession, pSession->Process, (int)pSession->Process, (uintptr_t)pSession->R0Process)); /** @todo %RTr0proc */
919
920#ifdef VBOX_WITH_HGCM
921 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
922 if (pSession->aHGCMClientIds[i])
923 {
924 VBoxGuestHGCMDisconnectInfo Info;
925 Info.result = 0;
926 Info.u32ClientID = pSession->aHGCMClientIds[i];
927 pSession->aHGCMClientIds[i] = 0;
928 Log(("VBoxGuestCloseSession: disconnecting client id %#RX32\n", Info.u32ClientID));
929 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
930 }
931#endif
932
933 pSession->pDevExt = NULL;
934 pSession->Process = NIL_RTPROCESS;
935 pSession->R0Process = NIL_RTR0PROCESS;
936 vboxGuestCloseMemBalloon(pDevExt, pSession);
937 RTMemFree(pSession);
938}
939
940
941/**
942 * Links the wait-for-event entry into the tail of the given list.
943 *
944 * @param pList The list to link it into.
945 * @param pWait The wait for event entry to append.
946 */
947DECLINLINE(void) VBoxGuestWaitAppend(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
948{
949 const PVBOXGUESTWAIT pTail = pList->pTail;
950 pWait->pNext = NULL;
951 pWait->pPrev = pTail;
952 if (pTail)
953 pTail->pNext = pWait;
954 else
955 pList->pHead = pWait;
956 pList->pTail = pWait;
957}
958
959
960/**
961 * Unlinks the wait-for-event entry.
962 *
963 * @param pList The list to unlink it from.
964 * @param pWait The wait for event entry to unlink.
965 */
966DECLINLINE(void) VBoxGuestWaitUnlink(PVBOXGUESTWAITLIST pList, PVBOXGUESTWAIT pWait)
967{
968 const PVBOXGUESTWAIT pPrev = pWait->pPrev;
969 const PVBOXGUESTWAIT pNext = pWait->pNext;
970 if (pNext)
971 pNext->pPrev = pPrev;
972 else
973 pList->pTail = pPrev;
974 if (pPrev)
975 pPrev->pNext = pNext;
976 else
977 pList->pHead = pNext;
978}
979
980
981/**
982 * Allocates a wiat-for-event entry.
983 *
984 * @returns The wait-for-event entry.
985 * @param pDevExt The device extension.
986 * @param pSession The session that's allocating this. Can be NULL.
987 */
988static PVBOXGUESTWAIT VBoxGuestWaitAlloc(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
989{
990 /*
991 * Allocate it one way or the other.
992 */
993 PVBOXGUESTWAIT pWait = pDevExt->FreeList.pTail;
994 if (pWait)
995 {
996 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
997 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
998
999 pWait = pDevExt->FreeList.pTail;
1000 if (pWait)
1001 VBoxGuestWaitUnlink(&pDevExt->FreeList, pWait);
1002
1003 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1004 }
1005 if (!pWait)
1006 {
1007 static unsigned s_cErrors = 0;
1008 int rc;
1009
1010 pWait = (PVBOXGUESTWAIT)RTMemAlloc(sizeof(*pWait));
1011 if (!pWait)
1012 {
1013 if (s_cErrors++ < 32)
1014 LogRel(("VBoxGuestWaitAlloc: out-of-memory!\n"));
1015 return NULL;
1016 }
1017
1018 rc = RTSemEventMultiCreate(&pWait->Event);
1019 if (RT_FAILURE(rc))
1020 {
1021 if (s_cErrors++ < 32)
1022 LogRel(("VBoxGuestCommonIOCtl: RTSemEventMultiCreate failed with rc=%Rrc!\n", rc));
1023 RTMemFree(pWait);
1024 return NULL;
1025 }
1026 }
1027
1028 /*
1029 * Zero members just as an precaution.
1030 */
1031 pWait->pNext = NULL;
1032 pWait->pPrev = NULL;
1033 pWait->fReqEvents = 0;
1034 pWait->fResEvents = 0;
1035 pWait->pSession = pSession;
1036#ifdef VBOX_WITH_HGCM
1037 pWait->pHGCMReq = NULL;
1038#endif
1039 RTSemEventMultiReset(pWait->Event);
1040 return pWait;
1041}
1042
1043
1044/**
1045 * Frees the wait-for-event entry.
1046 * The caller must own the wait spinlock!
1047 *
1048 * @param pDevExt The device extension.
1049 * @param pWait The wait-for-event entry to free.
1050 */
1051static void VBoxGuestWaitFreeLocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1052{
1053 pWait->fReqEvents = 0;
1054 pWait->fResEvents = 0;
1055#ifdef VBOX_WITH_HGCM
1056 pWait->pHGCMReq = NULL;
1057#endif
1058 VBoxGuestWaitAppend(&pDevExt->FreeList, pWait);
1059}
1060
1061
1062/**
1063 * Frees the wait-for-event entry.
1064 *
1065 * @param pDevExt The device extension.
1066 * @param pWait The wait-for-event entry to free.
1067 */
1068static void VBoxGuestWaitFreeUnlocked(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTWAIT pWait)
1069{
1070 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1071 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1072 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1073 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1074}
1075
1076
1077/**
1078 * Modifies the guest capabilities.
1079 *
1080 * Should be called during driver init and termination.
1081 *
1082 * @returns VBox status code.
1083 * @param fOr The Or mask (what to enable).
1084 * @param fNot The Not mask (what to disable).
1085 */
1086int VBoxGuestSetGuestCapabilities(uint32_t fOr, uint32_t fNot)
1087{
1088 VMMDevReqGuestCapabilities2 *pReq;
1089 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_SetGuestCapabilities);
1090 if (RT_FAILURE(rc))
1091 {
1092 Log(("VBoxGuestSetGuestCapabilities: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
1093 sizeof(*pReq), sizeof(*pReq), rc));
1094 return rc;
1095 }
1096
1097 pReq->u32OrMask = fOr;
1098 pReq->u32NotMask = fNot;
1099
1100 rc = VbglGRPerform(&pReq->header);
1101 if (RT_FAILURE(rc))
1102 Log(("VBoxGuestSetGuestCapabilities:VbglGRPerform failed, rc=%Rrc!\n", rc));
1103 else if (RT_FAILURE(pReq->header.rc))
1104 {
1105 Log(("VBoxGuestSetGuestCapabilities: The request failed; VMMDev rc=%Rrc!\n", pReq->header.rc));
1106 rc = pReq->header.rc;
1107 }
1108
1109 VbglGRFree(&pReq->header);
1110 return rc;
1111}
1112
1113
1114/**
1115 * Implements the fast (no input or output) type of IOCtls.
1116 *
1117 * This is currently just a placeholder stub inherited from the support driver code.
1118 *
1119 * @returns VBox status code.
1120 * @param iFunction The IOCtl function number.
1121 * @param pDevExt The device extension.
1122 * @param pSession The session.
1123 */
1124int VBoxGuestCommonIOCtlFast(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1125{
1126 Log(("VBoxGuestCommonIOCtlFast: iFunction=%#x pDevExt=%p pSession=%p\n", iFunction, pDevExt, pSession));
1127
1128 NOREF(iFunction);
1129 NOREF(pDevExt);
1130 NOREF(pSession);
1131 return VERR_NOT_SUPPORTED;
1132}
1133
1134
1135/**
1136 * Return the VMM device port.
1137 *
1138 * returns IPRT status code.
1139 * @param pDevExt The device extension.
1140 * @param pInfo The request info.
1141 * @param pcbDataReturned (out) contains the number of bytes to return.
1142 */
1143static int VBoxGuestCommonIOCtl_GetVMMDevPort(PVBOXGUESTDEVEXT pDevExt, VBoxGuestPortInfo *pInfo, size_t *pcbDataReturned)
1144{
1145 Log(("VBoxGuestCommonIOCtl: GETVMMDEVPORT\n"));
1146 pInfo->portAddress = pDevExt->IOPortBase;
1147 pInfo->pVMMDevMemory = (VMMDevMemory *)pDevExt->pVMMDevMemory;
1148 if (pcbDataReturned)
1149 *pcbDataReturned = sizeof(*pInfo);
1150 return VINF_SUCCESS;
1151}
1152
1153
1154/**
1155 * Worker VBoxGuestCommonIOCtl_WaitEvent.
1156 * The caller enters the spinlock, we may or may not leave it.
1157 *
1158 * @returns VINF_SUCCESS if we've left the spinlock and can return immediately.
1159 */
1160DECLINLINE(int) WaitEventCheckCondition(PVBOXGUESTDEVEXT pDevExt, VBoxGuestWaitEventInfo *pInfo,
1161 int iEvent, const uint32_t fReqEvents, PRTSPINLOCKTMP pTmp)
1162{
1163 uint32_t fMatches = pDevExt->f32PendingEvents & fReqEvents;
1164 if (fMatches)
1165 {
1166 ASMAtomicAndU32(&pDevExt->f32PendingEvents, ~fMatches);
1167 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, pTmp);
1168
1169 pInfo->u32EventFlagsOut = fMatches;
1170 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1171 if (fReqEvents & ~((uint32_t)1 << iEvent))
1172 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1173 else
1174 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1175 return VINF_SUCCESS;
1176 }
1177 return VERR_TIMEOUT;
1178}
1179
1180
1181static int VBoxGuestCommonIOCtl_WaitEvent(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1182 VBoxGuestWaitEventInfo *pInfo, size_t *pcbDataReturned, bool fInterruptible)
1183{
1184 pInfo->u32EventFlagsOut = 0;
1185 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1186 if (pcbDataReturned)
1187 *pcbDataReturned = sizeof(*pInfo);
1188
1189 /*
1190 * Copy and verify the input mask.
1191 */
1192 const uint32_t fReqEvents = pInfo->u32EventMaskIn;
1193 int iEvent = ASMBitFirstSetU32(fReqEvents) - 1;
1194 if (RT_UNLIKELY(iEvent < 0))
1195 {
1196 Log(("VBoxGuestCommonIOCtl: WAITEVENT: Invalid input mask %#x!!\n", fReqEvents));
1197 return VERR_INVALID_PARAMETER;
1198 }
1199
1200 /*
1201 * Check the condition up front, before doing the wait-for-event allocations.
1202 */
1203 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1204 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1205 int rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
1206 if (rc == VINF_SUCCESS)
1207 return rc;
1208 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1209
1210 if (!pInfo->u32TimeoutIn)
1211 {
1212 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1213 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
1214 return VERR_TIMEOUT;
1215 }
1216
1217 PVBOXGUESTWAIT pWait = VBoxGuestWaitAlloc(pDevExt, pSession);
1218 if (!pWait)
1219 return VERR_NO_MEMORY;
1220 pWait->fReqEvents = fReqEvents;
1221
1222 /*
1223 * We've got the wait entry now, re-enter the spinlock and check for the condition.
1224 * If the wait condition is met, return.
1225 * Otherwise enter into the list and go to sleep waiting for the ISR to signal us.
1226 */
1227 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1228 rc = WaitEventCheckCondition(pDevExt, pInfo, iEvent, fReqEvents, &Tmp);
1229 if (rc == VINF_SUCCESS)
1230 {
1231 VBoxGuestWaitFreeUnlocked(pDevExt, pWait);
1232 return rc;
1233 }
1234 VBoxGuestWaitAppend(&pDevExt->WaitList, pWait);
1235 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1236
1237 if (fInterruptible)
1238 rc = RTSemEventMultiWaitNoResume(pWait->Event,
1239 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1240 else
1241 rc = RTSemEventMultiWait(pWait->Event,
1242 pInfo->u32TimeoutIn == UINT32_MAX ? RT_INDEFINITE_WAIT : pInfo->u32TimeoutIn);
1243
1244 /*
1245 * There is one special case here and that's when the semaphore is
1246 * destroyed upon device driver unload. This shouldn't happen of course,
1247 * but in case it does, just get out of here ASAP.
1248 */
1249 if (rc == VERR_SEM_DESTROYED)
1250 return rc;
1251
1252 /*
1253 * Unlink the wait item and dispose of it.
1254 */
1255 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1256 VBoxGuestWaitUnlink(&pDevExt->WaitList, pWait);
1257 const uint32_t fResEvents = pWait->fResEvents;
1258 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1259 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1260
1261 /*
1262 * Now deal with the return code.
1263 */
1264 if ( fResEvents
1265 && fResEvents != UINT32_MAX)
1266 {
1267 pInfo->u32EventFlagsOut = fResEvents;
1268 pInfo->u32Result = VBOXGUEST_WAITEVENT_OK;
1269 if (fReqEvents & ~((uint32_t)1 << iEvent))
1270 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x\n", pInfo->u32EventFlagsOut));
1271 else
1272 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %#x/%d\n", pInfo->u32EventFlagsOut, iEvent));
1273 rc = VINF_SUCCESS;
1274 }
1275 else if ( fResEvents == UINT32_MAX
1276 || rc == VERR_INTERRUPTED)
1277 {
1278 pInfo->u32Result = VBOXGUEST_WAITEVENT_INTERRUPTED;
1279 rc = VERR_INTERRUPTED;
1280 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_INTERRUPTED\n"));
1281 }
1282 else if (rc == VERR_TIMEOUT)
1283 {
1284 pInfo->u32Result = VBOXGUEST_WAITEVENT_TIMEOUT;
1285 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns VERR_TIMEOUT\n"));
1286 }
1287 else
1288 {
1289 if (RT_SUCCESS(rc))
1290 {
1291 static unsigned s_cErrors = 0;
1292 if (s_cErrors++ < 32)
1293 LogRel(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc but no events!\n", rc));
1294 rc = VERR_INTERNAL_ERROR;
1295 }
1296 pInfo->u32Result = VBOXGUEST_WAITEVENT_ERROR;
1297 Log(("VBoxGuestCommonIOCtl: WAITEVENT: returns %Rrc\n", rc));
1298 }
1299
1300 return rc;
1301}
1302
1303
1304static int VBoxGuestCommonIOCtl_CancelAllWaitEvents(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession)
1305{
1306 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1307#if defined(RT_OS_SOLARIS)
1308 RTTHREADPREEMPTSTATE State = RTTHREADPREEMPTSTATE_INITIALIZER;
1309#endif
1310 PVBOXGUESTWAIT pWait;
1311 int rc = 0;
1312
1313 Log(("VBoxGuestCommonIOCtl: CANCEL_ALL_WAITEVENTS\n"));
1314
1315 /*
1316 * Walk the event list and wake up anyone with a matching session.
1317 *
1318 * Note! On Solaris we have to do really ugly stuff here because
1319 * RTSemEventMultiSignal cannot be called with interrupts disabled.
1320 * The hack is racy, but what we can we do... (Eliminate this
1321 * termination hack, perhaps?)
1322 */
1323#if defined(RT_OS_SOLARIS)
1324 RTThreadPreemptDisable(&State);
1325 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1326 do
1327 {
1328 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
1329 if ( pWait->pSession == pSession
1330 && pWait->fResEvents != UINT32_MAX)
1331 {
1332 RTSEMEVENTMULTI hEvent = pWait->Event;
1333 pWait->fResEvents = UINT32_MAX;
1334 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1335 /* HACK ALRET! This races wakeup + reuse! */
1336 rc |= RTSemEventMultiSignal(hEvent);
1337 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1338 break;
1339 }
1340 } while (pWait);
1341 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1342 RTThreadPreemptDisable(&State);
1343#else
1344 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1345 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
1346 if (pWait->pSession == pSession)
1347 {
1348 pWait->fResEvents = UINT32_MAX;
1349 rc |= RTSemEventMultiSignal(pWait->Event);
1350 }
1351 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1352#endif
1353 Assert(rc == 0);
1354
1355 return VINF_SUCCESS;
1356}
1357
1358
1359static int VBoxGuestCommonIOCtl_VMMRequest(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1360 VMMDevRequestHeader *pReqHdr, size_t cbData, size_t *pcbDataReturned)
1361{
1362 Log(("VBoxGuestCommonIOCtl: VMMREQUEST type %d\n", pReqHdr->requestType));
1363
1364 /*
1365 * Validate the header and request size.
1366 */
1367 const VMMDevRequestType enmType = pReqHdr->requestType;
1368 const uint32_t cbReq = pReqHdr->size;
1369 const uint32_t cbMinSize = vmmdevGetRequestSize(enmType);
1370 if (cbReq < cbMinSize)
1371 {
1372 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid hdr size %#x, expected >= %#x; type=%#x!!\n",
1373 cbReq, cbMinSize, enmType));
1374 return VERR_INVALID_PARAMETER;
1375 }
1376 if (cbReq > cbData)
1377 {
1378 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid size %#x, expected >= %#x (hdr); type=%#x!!\n",
1379 cbData, cbReq, enmType));
1380 return VERR_INVALID_PARAMETER;
1381 }
1382 int rc = VbglGRVerify(pReqHdr, cbData);
1383 if (RT_FAILURE(rc))
1384 {
1385 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: invalid header: size %#x, expected >= %#x (hdr); type=%#x; rc %d!!\n",
1386 cbData, cbReq, enmType, rc));
1387 return rc;
1388 }
1389
1390 /*
1391 * Make a copy of the request in the physical memory heap so
1392 * the VBoxGuestLibrary can more easily deal with the request.
1393 * (This is really a waste of time since the OS or the OS specific
1394 * code has already buffered or locked the input/output buffer, but
1395 * it does makes things a bit simpler wrt to phys address.)
1396 */
1397 VMMDevRequestHeader *pReqCopy;
1398 rc = VbglGRAlloc(&pReqCopy, cbReq, enmType);
1399 if (RT_FAILURE(rc))
1400 {
1401 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
1402 cbReq, cbReq, rc));
1403 return rc;
1404 }
1405 memcpy(pReqCopy, pReqHdr, cbReq);
1406
1407 if (enmType == VMMDevReq_GetMouseStatus) /* clear poll condition. */
1408 pSession->u32MousePosChangedSeq = ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq);
1409
1410 rc = VbglGRPerform(pReqCopy);
1411 if ( RT_SUCCESS(rc)
1412 && RT_SUCCESS(pReqCopy->rc))
1413 {
1414 Assert(rc != VINF_HGCM_ASYNC_EXECUTE);
1415 Assert(pReqCopy->rc != VINF_HGCM_ASYNC_EXECUTE);
1416
1417 memcpy(pReqHdr, pReqCopy, cbReq);
1418 if (pcbDataReturned)
1419 *pcbDataReturned = cbReq;
1420 }
1421 else if (RT_FAILURE(rc))
1422 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: VbglGRPerform - rc=%Rrc!\n", rc));
1423 else
1424 {
1425 Log(("VBoxGuestCommonIOCtl: VMMREQUEST: request execution failed; VMMDev rc=%Rrc!\n", pReqCopy->rc));
1426 rc = pReqCopy->rc;
1427 }
1428
1429 VbglGRFree(pReqCopy);
1430 return rc;
1431}
1432
1433
1434static int VBoxGuestCommonIOCtl_CtlFilterMask(PVBOXGUESTDEVEXT pDevExt, VBoxGuestFilterMaskInfo *pInfo)
1435{
1436 VMMDevCtlGuestFilterMask *pReq;
1437 int rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(*pReq), VMMDevReq_CtlGuestFilterMask);
1438 if (RT_FAILURE(rc))
1439 {
1440 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: failed to allocate %u (%#x) bytes to cache the request. rc=%d!!\n",
1441 sizeof(*pReq), sizeof(*pReq), rc));
1442 return rc;
1443 }
1444
1445 pReq->u32OrMask = pInfo->u32OrMask;
1446 pReq->u32NotMask = pInfo->u32NotMask;
1447 pReq->u32NotMask &= ~pDevExt->fFixedEvents; /* don't permit these to be cleared! */
1448 rc = VbglGRPerform(&pReq->header);
1449 if (RT_FAILURE(rc))
1450 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: VbglGRPerform failed, rc=%Rrc!\n", rc));
1451 else if (RT_FAILURE(pReq->header.rc))
1452 {
1453 Log(("VBoxGuestCommonIOCtl: CTL_FILTER_MASK: The request failed; VMMDev rc=%Rrc!\n", pReq->header.rc));
1454 rc = pReq->header.rc;
1455 }
1456
1457 VbglGRFree(&pReq->header);
1458 return rc;
1459}
1460
1461#ifdef VBOX_WITH_HGCM
1462
1463AssertCompile(RT_INDEFINITE_WAIT == (uint32_t)RT_INDEFINITE_WAIT); /* assumed by code below */
1464
1465/** Worker for VBoxGuestHGCMAsyncWaitCallback*. */
1466static int VBoxGuestHGCMAsyncWaitCallbackWorker(VMMDevHGCMRequestHeader volatile *pHdr, PVBOXGUESTDEVEXT pDevExt,
1467 bool fInterruptible, uint32_t cMillies)
1468{
1469
1470 /*
1471 * Check to see if the condition was met by the time we got here.
1472 *
1473 * We create a simple poll loop here for dealing with out-of-memory
1474 * conditions since the caller isn't necessarily able to deal with
1475 * us returning too early.
1476 */
1477 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1478 PVBOXGUESTWAIT pWait;
1479 for (;;)
1480 {
1481 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1482 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1483 {
1484 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1485 return VINF_SUCCESS;
1486 }
1487 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1488
1489 pWait = VBoxGuestWaitAlloc(pDevExt, NULL);
1490 if (pWait)
1491 break;
1492 if (fInterruptible)
1493 return VERR_INTERRUPTED;
1494 RTThreadSleep(1);
1495 }
1496 pWait->fReqEvents = VMMDEV_EVENT_HGCM;
1497 pWait->pHGCMReq = pHdr;
1498
1499 /*
1500 * Re-enter the spinlock and re-check for the condition.
1501 * If the condition is met, return.
1502 * Otherwise link us into the HGCM wait list and go to sleep.
1503 */
1504 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1505 if ((pHdr->fu32Flags & VBOX_HGCM_REQ_DONE) != 0)
1506 {
1507 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1508 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1509 return VINF_SUCCESS;
1510 }
1511 VBoxGuestWaitAppend(&pDevExt->HGCMWaitList, pWait);
1512 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1513
1514 int rc;
1515 if (fInterruptible)
1516 rc = RTSemEventMultiWaitNoResume(pWait->Event, cMillies);
1517 else
1518 rc = RTSemEventMultiWait(pWait->Event, cMillies);
1519 if (rc == VERR_SEM_DESTROYED)
1520 return rc;
1521
1522 /*
1523 * Unlink, free and return.
1524 */
1525 if ( RT_FAILURE(rc)
1526 && rc != VERR_TIMEOUT
1527 && ( !fInterruptible
1528 || rc != VERR_INTERRUPTED))
1529 LogRel(("VBoxGuestHGCMAsyncWaitCallback: wait failed! %Rrc\n", rc));
1530
1531 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
1532 VBoxGuestWaitUnlink(&pDevExt->HGCMWaitList, pWait);
1533 VBoxGuestWaitFreeLocked(pDevExt, pWait);
1534 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
1535 return rc;
1536}
1537
1538
1539/**
1540 * This is a callback for dealing with async waits.
1541 *
1542 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1543 */
1544static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallback(VMMDevHGCMRequestHeader *pHdr, void *pvUser, uint32_t u32User)
1545{
1546 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1547 Log(("VBoxGuestHGCMAsyncWaitCallback: requestType=%d\n", pHdr->header.requestType));
1548 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1549 pDevExt,
1550 false /* fInterruptible */,
1551 u32User /* cMillies */);
1552}
1553
1554
1555/**
1556 * This is a callback for dealing with async waits with a timeout.
1557 *
1558 * It operates in a manner similar to VBoxGuestCommonIOCtl_WaitEvent.
1559 */
1560static DECLCALLBACK(int) VBoxGuestHGCMAsyncWaitCallbackInterruptible(VMMDevHGCMRequestHeader *pHdr,
1561 void *pvUser, uint32_t u32User)
1562{
1563 PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pvUser;
1564 Log(("VBoxGuestHGCMAsyncWaitCallbackInterruptible: requestType=%d\n", pHdr->header.requestType));
1565 return VBoxGuestHGCMAsyncWaitCallbackWorker((VMMDevHGCMRequestHeader volatile *)pHdr,
1566 pDevExt,
1567 true /* fInterruptible */,
1568 u32User /* cMillies */ );
1569
1570}
1571
1572
1573static int VBoxGuestCommonIOCtl_HGCMConnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMConnectInfo *pInfo,
1574 size_t *pcbDataReturned)
1575{
1576 /*
1577 * The VbglHGCMConnect call will invoke the callback if the HGCM
1578 * call is performed in an ASYNC fashion. The function is not able
1579 * to deal with cancelled requests.
1580 */
1581 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: %.128s\n",
1582 pInfo->Loc.type == VMMDevHGCMLoc_LocalHost || pInfo->Loc.type == VMMDevHGCMLoc_LocalHost_Existing
1583 ? pInfo->Loc.u.host.achName : "<not local host>"));
1584
1585 int rc = VbglR0HGCMInternalConnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1586 if (RT_SUCCESS(rc))
1587 {
1588 Log(("VBoxGuestCommonIOCtl: HGCM_CONNECT: u32Client=%RX32 result=%Rrc (rc=%Rrc)\n",
1589 pInfo->u32ClientID, pInfo->result, rc));
1590 if (RT_SUCCESS(pInfo->result))
1591 {
1592 /*
1593 * Append the client id to the client id table.
1594 * If the table has somehow become filled up, we'll disconnect the session.
1595 */
1596 unsigned i;
1597 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1598 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1599 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1600 if (!pSession->aHGCMClientIds[i])
1601 {
1602 pSession->aHGCMClientIds[i] = pInfo->u32ClientID;
1603 break;
1604 }
1605 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1606 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1607 {
1608 static unsigned s_cErrors = 0;
1609 if (s_cErrors++ < 32)
1610 LogRel(("VBoxGuestCommonIOCtl: HGCM_CONNECT: too many HGCMConnect calls for one session!\n"));
1611
1612 VBoxGuestHGCMDisconnectInfo Info;
1613 Info.result = 0;
1614 Info.u32ClientID = pInfo->u32ClientID;
1615 VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1616 return VERR_TOO_MANY_OPEN_FILES;
1617 }
1618 }
1619 if (pcbDataReturned)
1620 *pcbDataReturned = sizeof(*pInfo);
1621 }
1622 return rc;
1623}
1624
1625
1626static int VBoxGuestCommonIOCtl_HGCMDisconnect(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession, VBoxGuestHGCMDisconnectInfo *pInfo,
1627 size_t *pcbDataReturned)
1628{
1629 /*
1630 * Validate the client id and invalidate its entry while we're in the call.
1631 */
1632 const uint32_t u32ClientId = pInfo->u32ClientID;
1633 unsigned i;
1634 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1635 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1636 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1637 if (pSession->aHGCMClientIds[i] == u32ClientId)
1638 {
1639 pSession->aHGCMClientIds[i] = UINT32_MAX;
1640 break;
1641 }
1642 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1643 if (i >= RT_ELEMENTS(pSession->aHGCMClientIds))
1644 {
1645 static unsigned s_cErrors = 0;
1646 if (s_cErrors++ > 32)
1647 LogRel(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", u32ClientId));
1648 return VERR_INVALID_HANDLE;
1649 }
1650
1651 /*
1652 * The VbglHGCMConnect call will invoke the callback if the HGCM
1653 * call is performed in an ASYNC fashion. The function is not able
1654 * to deal with cancelled requests.
1655 */
1656 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: u32Client=%RX32\n", pInfo->u32ClientID));
1657 int rc = VbglR0HGCMInternalDisconnect(pInfo, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1658 if (RT_SUCCESS(rc))
1659 {
1660 Log(("VBoxGuestCommonIOCtl: HGCM_DISCONNECT: result=%Rrc\n", pInfo->result));
1661 if (pcbDataReturned)
1662 *pcbDataReturned = sizeof(*pInfo);
1663 }
1664
1665 /* Update the client id array according to the result. */
1666 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1667 if (pSession->aHGCMClientIds[i] == UINT32_MAX)
1668 pSession->aHGCMClientIds[i] = RT_SUCCESS(rc) && RT_SUCCESS(pInfo->result) ? 0 : u32ClientId;
1669 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1670
1671 return rc;
1672}
1673
1674
1675static int VBoxGuestCommonIOCtl_HGCMCall(PVBOXGUESTDEVEXT pDevExt,
1676 PVBOXGUESTSESSION pSession,
1677 VBoxGuestHGCMCallInfo *pInfo,
1678 uint32_t cMillies, bool fInterruptible, bool f32bit,
1679 size_t cbExtra, size_t cbData, size_t *pcbDataReturned)
1680{
1681 /*
1682 * Some more validations.
1683 */
1684 if (pInfo->cParms > 4096) /* (Just make sure it doesn't overflow the next check.) */
1685 {
1686 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cParm=%RX32 is not sane\n", pInfo->cParms));
1687 return VERR_INVALID_PARAMETER;
1688 }
1689 size_t cbActual = cbExtra + sizeof(*pInfo);
1690#ifdef RT_ARCH_AMD64
1691 if (f32bit)
1692 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter32);
1693 else
1694#endif
1695 cbActual += pInfo->cParms * sizeof(HGCMFunctionParameter);
1696 if (cbData < cbActual)
1697 {
1698 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: cbData=%#zx (%zu) required size is %#zx (%zu)\n",
1699 cbData, cbActual));
1700 return VERR_INVALID_PARAMETER;
1701 }
1702
1703 /*
1704 * Validate the client id.
1705 */
1706 const uint32_t u32ClientId = pInfo->u32ClientID;
1707 unsigned i;
1708 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1709 RTSpinlockAcquireNoInts(pDevExt->SessionSpinlock, &Tmp);
1710 for (i = 0; i < RT_ELEMENTS(pSession->aHGCMClientIds); i++)
1711 if (pSession->aHGCMClientIds[i] == u32ClientId)
1712 break;
1713 RTSpinlockReleaseNoInts(pDevExt->SessionSpinlock, &Tmp);
1714 if (RT_UNLIKELY(i >= RT_ELEMENTS(pSession->aHGCMClientIds)))
1715 {
1716 static unsigned s_cErrors = 0;
1717 if (s_cErrors++ > 32)
1718 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: Invalid handle. u32Client=%RX32\n", u32ClientId));
1719 return VERR_INVALID_HANDLE;
1720 }
1721
1722 /*
1723 * The VbglHGCMCall call will invoke the callback if the HGCM
1724 * call is performed in an ASYNC fashion. This function can
1725 * deal with cancelled requests, so we let user more requests
1726 * be interruptible (should add a flag for this later I guess).
1727 */
1728 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: u32Client=%RX32\n", pInfo->u32ClientID));
1729 int rc;
1730 uint32_t fFlags = pSession->R0Process == NIL_RTR0PROCESS ? VBGLR0_HGCMCALL_F_KERNEL : VBGLR0_HGCMCALL_F_USER;
1731#ifdef RT_ARCH_AMD64
1732 if (f32bit)
1733 {
1734 if (fInterruptible)
1735 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1736 else
1737 rc = VbglR0HGCMInternalCall32(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1738 }
1739 else
1740#endif
1741 {
1742 if (fInterruptible)
1743 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallbackInterruptible, pDevExt, cMillies);
1744 else
1745 rc = VbglR0HGCMInternalCall(pInfo, cbData - cbExtra, fFlags, VBoxGuestHGCMAsyncWaitCallback, pDevExt, cMillies);
1746 }
1747 if (RT_SUCCESS(rc))
1748 {
1749 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: result=%Rrc\n", pInfo->result));
1750 if (pcbDataReturned)
1751 *pcbDataReturned = cbActual;
1752 }
1753 else
1754 {
1755 if (rc != VERR_INTERRUPTED)
1756 LogRel(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
1757 else
1758 Log(("VBoxGuestCommonIOCtl: HGCM_CALL: %s Failed. rc=%Rrc.\n", f32bit ? "32" : "64", rc));
1759 }
1760 return rc;
1761}
1762
1763
1764/**
1765 * @returns VBox status code. Unlike the other HGCM IOCtls this will combine
1766 * the VbglHGCMConnect/Disconnect return code with the Info.result.
1767 *
1768 * @param pDevExt The device extension.
1769 * @param pu32ClientId The client id.
1770 * @param pcbDataReturned Where to store the amount of returned data. Can
1771 * be NULL.
1772 */
1773static int VBoxGuestCommonIOCtl_HGCMClipboardReConnect(PVBOXGUESTDEVEXT pDevExt, uint32_t *pu32ClientId, size_t *pcbDataReturned)
1774{
1775 int rc;
1776 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: Current u32ClientId=%RX32\n", pDevExt->u32ClipboardClientId));
1777
1778
1779 /*
1780 * If there is an old client, try disconnect it first.
1781 */
1782 if (pDevExt->u32ClipboardClientId != 0)
1783 {
1784 VBoxGuestHGCMDisconnectInfo Info;
1785 Info.result = VERR_WRONG_ORDER;
1786 Info.u32ClientID = pDevExt->u32ClipboardClientId;
1787 rc = VbglR0HGCMInternalDisconnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1788 if (RT_SUCCESS(rc))
1789 {
1790 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. VbglHGCMDisconnect -> rc=%Rrc\n", rc));
1791 return rc;
1792 }
1793 if (RT_FAILURE((int32_t)Info.result))
1794 {
1795 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: failed to disconnect old client. Info.result=%Rrc\n", rc));
1796 return Info.result;
1797 }
1798 pDevExt->u32ClipboardClientId = 0;
1799 }
1800
1801 /*
1802 * Try connect.
1803 */
1804 VBoxGuestHGCMConnectInfo Info;
1805 Info.Loc.type = VMMDevHGCMLoc_LocalHost_Existing;
1806 strcpy(Info.Loc.u.host.achName, "VBoxSharedClipboard");
1807 Info.u32ClientID = 0;
1808 Info.result = VERR_WRONG_ORDER;
1809
1810 rc = VbglR0HGCMInternalConnect(&Info, VBoxGuestHGCMAsyncWaitCallback, pDevExt, RT_INDEFINITE_WAIT);
1811 if (RT_FAILURE(rc))
1812 {
1813 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1814 return rc;
1815 }
1816 if (RT_FAILURE(Info.result))
1817 {
1818 LogRel(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: VbglHGCMConnected -> rc=%Rrc\n", rc));
1819 return rc;
1820 }
1821
1822 Log(("VBoxGuestCommonIOCtl: CLIPBOARD_CONNECT: connected successfully u32ClientId=%RX32\n", Info.u32ClientID));
1823
1824 pDevExt->u32ClipboardClientId = Info.u32ClientID;
1825 *pu32ClientId = Info.u32ClientID;
1826 if (pcbDataReturned)
1827 *pcbDataReturned = sizeof(uint32_t);
1828
1829 return VINF_SUCCESS;
1830}
1831
1832#endif /* VBOX_WITH_HGCM */
1833
1834/**
1835 * Handle VBOXGUEST_IOCTL_CHECK_BALLOON from R3.
1836 *
1837 * Ask the host for the size of the balloon and try to set it accordingly. If
1838 * this fails, return with VERR_NO_PHYS_MEMORY and userland has to provide the
1839 * memory.
1840 *
1841 * @returns VBox status code.
1842 *
1843 * @param pDevExt The device extension.
1844 * @param pSession The session.
1845 * @param pInfo The output buffer.
1846 * @param pcbDataReturned Where to store the amount of returned data. Can
1847 * be NULL.
1848 */
1849static int VBoxGuestCommonIOCtl_QueryMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1850 VBoxGuestCheckBalloonInfo *pInfo, size_t *pcbDataReturned)
1851{
1852 VMMDevGetMemBalloonChangeRequest *pReq;
1853 PVBOXGUESTSESSION pOwner;
1854 int rc;
1855
1856 Log(("VBoxGuestCommonIOCtl: QUERYMEMORYBALLOON\n"));
1857 /* the first user trying to query/change the balloon is the owner */
1858 if ( !ASMAtomicCmpXchgExPtr((void * volatile *)&pDevExt->MemBalloon.pOwner, (const void*) pSession,
1859 (PVBOXGUESTSESSION)NULL, (void**)&pOwner)
1860 && pOwner != pSession)
1861 return VERR_PERMISSION_DENIED;
1862
1863 rc = VbglGRAlloc((VMMDevRequestHeader **)&pReq, sizeof(VMMDevGetMemBalloonChangeRequest), VMMDevReq_GetMemBalloonChangeRequest);
1864 if (RT_FAILURE(rc))
1865 return rc;
1866
1867 /* This is a response to that event. Setting this bit means that we request the value
1868 * from the host and change the guest memory balloon according to this value. */
1869 pReq->eventAck = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
1870 rc = VbglGRPerform(&pReq->header);
1871 if (RT_FAILURE(rc))
1872 {
1873 LogRel(("VBoxGuestCommonIOCtl: QUERYMEMORYBALLOON: VbglGRPerform failed. rc=%d\n", rc));
1874 VbglGRFree(&pReq->header);
1875 return rc;
1876 }
1877
1878 Assert(pDevExt->MemBalloon.cMaxChunks == pReq->cPhysMemChunks || pDevExt->MemBalloon.cMaxChunks == 0);
1879 pDevExt->MemBalloon.cMaxChunks = pReq->cPhysMemChunks;
1880
1881 rc = vboxGuestSetBalloonSizeKernel(pDevExt, pSession, pReq->cBalloonChunks);
1882 /* Ignore out of memory failures */
1883 if (rc == VERR_NO_MEMORY)
1884 rc = VINF_SUCCESS;
1885
1886 /* Return values */
1887 pInfo->cBalloonChunks = pReq->cBalloonChunks;
1888 pInfo->fHandleInR3 = false;
1889 if (rc == VERR_NO_PHYS_MEMORY)
1890 {
1891 pInfo->fHandleInR3 = true;
1892 rc = VINF_SUCCESS;
1893 }
1894
1895 VbglGRFree(&pReq->header);
1896
1897 if (pcbDataReturned)
1898 *pcbDataReturned = sizeof(VBoxGuestCheckBalloonInfo);
1899
1900 Log(("VBoxGuestCommonIOCtl: QUERYMEMORYBALLOON returns %d\n", rc));
1901 return rc;
1902}
1903
1904
1905/**
1906 * Handle a request for changing the memory balloon.
1907 *
1908 * @returns VBox status code.
1909 *
1910 * @param pDevExt The device extention.
1911 * @param pSession The session.
1912 * @param pInfo The change request structure (input).
1913 * @param pcbDataReturned Where to store the amount of returned data. Can
1914 * be NULL.
1915 */
1916static int VBoxGuestCommonIOCtl_ChangeMemoryBalloon(PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1917 VBoxGuestChangeBalloonInfo *pInfo, size_t *pcbDataReturned)
1918{
1919 int rc;
1920 PVBOXGUESTSESSION pOwner;
1921
1922 if (pDevExt->MemBalloon.fUseKernelAPI)
1923 return VERR_PERMISSION_DENIED;
1924
1925 /* the first user trying to query/change the balloon is the owner */
1926 if ( !ASMAtomicCmpXchgExPtr((void * volatile *)&pDevExt->MemBalloon.pOwner, (const void*) pSession,
1927 (PVBOXGUESTSESSION)NULL, (void**)&pOwner)
1928 && pOwner != pSession)
1929 return VERR_PERMISSION_DENIED;
1930
1931 rc = vboxGuestSetBalloonSizeFromUser(pDevExt, pSession, pInfo->u64ChunkAddr, pInfo->fInflate);
1932 if (pcbDataReturned)
1933 *pcbDataReturned = 0;
1934 return rc;
1935}
1936
1937
1938/**
1939 * Guest backdoor logging.
1940 *
1941 * @returns VBox status code.
1942 *
1943 * @param pch The log message (need not be NULL terminated).
1944 * @param cbData Size of the buffer.
1945 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
1946 */
1947static int VBoxGuestCommonIOCtl_Log(const char *pch, size_t cbData, size_t *pcbDataReturned)
1948{
1949 NOREF(pch);
1950 NOREF(cbData);
1951 Log(("%.*s", cbData, pch));
1952 if (pcbDataReturned)
1953 *pcbDataReturned = 0;
1954 return VINF_SUCCESS;
1955}
1956
1957
1958/**
1959 * Common IOCtl for user to kernel and kernel to kernel communcation.
1960 *
1961 * This function only does the basic validation and then invokes
1962 * worker functions that takes care of each specific function.
1963 *
1964 * @returns VBox status code.
1965 *
1966 * @param iFunction The requested function.
1967 * @param pDevExt The device extension.
1968 * @param pSession The client session.
1969 * @param pvData The input/output data buffer. Can be NULL depending on the function.
1970 * @param cbData The max size of the data buffer.
1971 * @param pcbDataReturned Where to store the amount of returned data. Can be NULL.
1972 */
1973int VBoxGuestCommonIOCtl(unsigned iFunction, PVBOXGUESTDEVEXT pDevExt, PVBOXGUESTSESSION pSession,
1974 void *pvData, size_t cbData, size_t *pcbDataReturned)
1975{
1976 Log(("VBoxGuestCommonIOCtl: iFunction=%#x pDevExt=%p pSession=%p pvData=%p cbData=%zu\n",
1977 iFunction, pDevExt, pSession, pvData, cbData));
1978
1979 /*
1980 * Make sure the returned data size is set to zero.
1981 */
1982 if (pcbDataReturned)
1983 *pcbDataReturned = 0;
1984
1985 /*
1986 * Define some helper macros to simplify validation.
1987 */
1988#define CHECKRET_RING0(mnemonic) \
1989 do { \
1990 if (pSession->R0Process != NIL_RTR0PROCESS) \
1991 { \
1992 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": Ring-0 only, caller is %RTproc/%p\n", \
1993 pSession->Process, (uintptr_t)pSession->R0Process)); \
1994 return VERR_PERMISSION_DENIED; \
1995 } \
1996 } while (0)
1997#define CHECKRET_MIN_SIZE(mnemonic, cbMin) \
1998 do { \
1999 if (cbData < (cbMin)) \
2000 { \
2001 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": cbData=%#zx (%zu) min is %#zx (%zu)\n", \
2002 cbData, cbData, (size_t)(cbMin), (size_t)(cbMin))); \
2003 return VERR_BUFFER_OVERFLOW; \
2004 } \
2005 if ((cbMin) != 0 && !VALID_PTR(pvData)) \
2006 { \
2007 LogRel(("VBoxGuestCommonIOCtl: " mnemonic ": Invalid pointer %p\n", pvData)); \
2008 return VERR_INVALID_POINTER; \
2009 } \
2010 } while (0)
2011
2012
2013 /*
2014 * Deal with variably sized requests first.
2015 */
2016 int rc = VINF_SUCCESS;
2017 if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_VMMREQUEST(0)))
2018 {
2019 CHECKRET_MIN_SIZE("VMMREQUEST", sizeof(VMMDevRequestHeader));
2020 rc = VBoxGuestCommonIOCtl_VMMRequest(pDevExt, pSession, (VMMDevRequestHeader *)pvData, cbData, pcbDataReturned);
2021 }
2022#ifdef VBOX_WITH_HGCM
2023 /*
2024 * These ones are a bit tricky.
2025 */
2026 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL(0)))
2027 {
2028 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2029 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2030 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2031 fInterruptible, false /*f32bit*/,
2032 0, cbData, pcbDataReturned);
2033 }
2034 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED(0)))
2035 {
2036 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2037 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2038 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2039 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2040 false /*f32bit*/,
2041 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2042 }
2043# ifdef RT_ARCH_AMD64
2044 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_32(0)))
2045 {
2046 CHECKRET_MIN_SIZE("HGCM_CALL", sizeof(VBoxGuestHGCMCallInfo));
2047 bool fInterruptible = pSession->R0Process != NIL_RTR0PROCESS;
2048 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, (VBoxGuestHGCMCallInfo *)pvData, RT_INDEFINITE_WAIT,
2049 fInterruptible, true /*f32bit*/,
2050 0, cbData, pcbDataReturned);
2051 }
2052 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_HGCM_CALL_TIMED_32(0)))
2053 {
2054 CHECKRET_MIN_SIZE("HGCM_CALL_TIMED", sizeof(VBoxGuestHGCMCallInfoTimed));
2055 VBoxGuestHGCMCallInfoTimed *pInfo = (VBoxGuestHGCMCallInfoTimed *)pvData;
2056 rc = VBoxGuestCommonIOCtl_HGCMCall(pDevExt, pSession, &pInfo->info, pInfo->u32Timeout,
2057 !!pInfo->fInterruptible || pSession->R0Process != NIL_RTR0PROCESS,
2058 true /*f32bit*/,
2059 RT_OFFSETOF(VBoxGuestHGCMCallInfoTimed, info), cbData, pcbDataReturned);
2060 }
2061# endif
2062#endif /* VBOX_WITH_HGCM */
2063 else if (VBOXGUEST_IOCTL_STRIP_SIZE(iFunction) == VBOXGUEST_IOCTL_STRIP_SIZE(VBOXGUEST_IOCTL_LOG(0)))
2064 {
2065 CHECKRET_MIN_SIZE("LOG", 1);
2066 rc = VBoxGuestCommonIOCtl_Log((char *)pvData, cbData, pcbDataReturned);
2067 }
2068 else
2069 {
2070 switch (iFunction)
2071 {
2072 case VBOXGUEST_IOCTL_GETVMMDEVPORT:
2073 CHECKRET_RING0("GETVMMDEVPORT");
2074 CHECKRET_MIN_SIZE("GETVMMDEVPORT", sizeof(VBoxGuestPortInfo));
2075 rc = VBoxGuestCommonIOCtl_GetVMMDevPort(pDevExt, (VBoxGuestPortInfo *)pvData, pcbDataReturned);
2076 break;
2077
2078 case VBOXGUEST_IOCTL_WAITEVENT:
2079 CHECKRET_MIN_SIZE("WAITEVENT", sizeof(VBoxGuestWaitEventInfo));
2080 rc = VBoxGuestCommonIOCtl_WaitEvent(pDevExt, pSession, (VBoxGuestWaitEventInfo *)pvData,
2081 pcbDataReturned, pSession->R0Process != NIL_RTR0PROCESS);
2082 break;
2083
2084 case VBOXGUEST_IOCTL_CANCEL_ALL_WAITEVENTS:
2085 if (cbData != 0)
2086 rc = VERR_INVALID_PARAMETER;
2087 rc = VBoxGuestCommonIOCtl_CancelAllWaitEvents(pDevExt, pSession);
2088 break;
2089
2090 case VBOXGUEST_IOCTL_CTL_FILTER_MASK:
2091 CHECKRET_MIN_SIZE("CTL_FILTER_MASK", sizeof(VBoxGuestFilterMaskInfo));
2092 rc = VBoxGuestCommonIOCtl_CtlFilterMask(pDevExt, (VBoxGuestFilterMaskInfo *)pvData);
2093 break;
2094
2095#ifdef VBOX_WITH_HGCM
2096 case VBOXGUEST_IOCTL_HGCM_CONNECT:
2097# ifdef RT_ARCH_AMD64
2098 case VBOXGUEST_IOCTL_HGCM_CONNECT_32:
2099# endif
2100 CHECKRET_MIN_SIZE("HGCM_CONNECT", sizeof(VBoxGuestHGCMConnectInfo));
2101 rc = VBoxGuestCommonIOCtl_HGCMConnect(pDevExt, pSession, (VBoxGuestHGCMConnectInfo *)pvData, pcbDataReturned);
2102 break;
2103
2104 case VBOXGUEST_IOCTL_HGCM_DISCONNECT:
2105# ifdef RT_ARCH_AMD64
2106 case VBOXGUEST_IOCTL_HGCM_DISCONNECT_32:
2107# endif
2108 CHECKRET_MIN_SIZE("HGCM_DISCONNECT", sizeof(VBoxGuestHGCMDisconnectInfo));
2109 rc = VBoxGuestCommonIOCtl_HGCMDisconnect(pDevExt, pSession, (VBoxGuestHGCMDisconnectInfo *)pvData, pcbDataReturned);
2110 break;
2111
2112 case VBOXGUEST_IOCTL_CLIPBOARD_CONNECT:
2113 CHECKRET_MIN_SIZE("CLIPBOARD_CONNECT", sizeof(uint32_t));
2114 rc = VBoxGuestCommonIOCtl_HGCMClipboardReConnect(pDevExt, (uint32_t *)pvData, pcbDataReturned);
2115 break;
2116#endif /* VBOX_WITH_HGCM */
2117
2118 case VBOXGUEST_IOCTL_CHECK_BALLOON:
2119 CHECKRET_MIN_SIZE("CHECK_MEMORY_BALLOON", sizeof(VBoxGuestCheckBalloonInfo));
2120 rc = VBoxGuestCommonIOCtl_QueryMemoryBalloon(pDevExt, pSession, (VBoxGuestCheckBalloonInfo *)pvData, pcbDataReturned);
2121 break;
2122
2123 case VBOXGUEST_IOCTL_CHANGE_BALLOON:
2124 CHECKRET_MIN_SIZE("CHANGE_MEMORY_BALLOON", sizeof(VBoxGuestChangeBalloonInfo));
2125 rc = VBoxGuestCommonIOCtl_ChangeMemoryBalloon(pDevExt, pSession, (VBoxGuestChangeBalloonInfo *)pvData, pcbDataReturned);
2126 break;
2127
2128 default:
2129 {
2130 LogRel(("VBoxGuestCommonIOCtl: Unknown request iFunction=%#x Stripped size=%#x\n", iFunction,
2131 VBOXGUEST_IOCTL_STRIP_SIZE(iFunction)));
2132 rc = VERR_NOT_SUPPORTED;
2133 break;
2134 }
2135 }
2136 }
2137
2138 Log(("VBoxGuestCommonIOCtl: returns %Rrc *pcbDataReturned=%zu\n", rc, pcbDataReturned ? *pcbDataReturned : 0));
2139 return rc;
2140}
2141
2142
2143
2144/**
2145 * Common interrupt service routine.
2146 *
2147 * This deals with events and with waking up thread waiting for those events.
2148 *
2149 * @returns true if it was our interrupt, false if it wasn't.
2150 * @param pDevExt The VBoxGuest device extension.
2151 */
2152bool VBoxGuestCommonISR(PVBOXGUESTDEVEXT pDevExt)
2153{
2154 bool fMousePositionChanged = false;
2155 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
2156 VMMDevEvents volatile *pReq = pDevExt->pIrqAckEvents;
2157 int rc = 0;
2158 bool fOurIrq;
2159
2160 /*
2161 * Make sure we've initalized the device extension.
2162 */
2163 if (RT_UNLIKELY(!pReq))
2164 return false;
2165
2166 /*
2167 * Enter the spinlock and check if it's our IRQ or not.
2168 *
2169 * Note! Solaris cannot do RTSemEventMultiSignal with interrupts disabled
2170 * so we're entering the spinlock without disabling them. This works
2171 * fine as long as we never called in a nested fashion.
2172 */
2173#if defined(RT_OS_SOLARIS)
2174 RTSpinlockAcquire(pDevExt->EventSpinlock, &Tmp);
2175#else
2176 RTSpinlockAcquireNoInts(pDevExt->EventSpinlock, &Tmp);
2177#endif
2178 fOurIrq = pDevExt->pVMMDevMemory->V.V1_04.fHaveEvents;
2179 if (fOurIrq)
2180 {
2181 /*
2182 * Acknowlegde events.
2183 * We don't use VbglGRPerform here as it may take another spinlocks.
2184 */
2185 pReq->header.rc = VERR_INTERNAL_ERROR;
2186 pReq->events = 0;
2187 ASMCompilerBarrier();
2188 ASMOutU32(pDevExt->IOPortBase + VMMDEV_PORT_OFF_REQUEST, (uint32_t)pDevExt->PhysIrqAckEvents);
2189 ASMCompilerBarrier(); /* paranoia */
2190 if (RT_SUCCESS(pReq->header.rc))
2191 {
2192 uint32_t fEvents = pReq->events;
2193 PVBOXGUESTWAIT pWait;
2194
2195 Log(("VBoxGuestCommonISR: acknowledge events succeeded %#RX32\n", fEvents));
2196
2197 /*
2198 * VMMDEV_EVENT_MOUSE_POSITION_CHANGED can only be polled for.
2199 */
2200 if (fEvents & VMMDEV_EVENT_MOUSE_POSITION_CHANGED)
2201 {
2202 fMousePositionChanged = true;
2203 fEvents &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
2204 }
2205
2206#ifdef VBOX_WITH_HGCM
2207 /*
2208 * The HGCM event/list is kind of different in that we evaluate all entries.
2209 */
2210 if (fEvents & VMMDEV_EVENT_HGCM)
2211 {
2212 for (pWait = pDevExt->HGCMWaitList.pHead; pWait; pWait = pWait->pNext)
2213 if ( !pWait->fResEvents
2214 && (pWait->pHGCMReq->fu32Flags & VBOX_HGCM_REQ_DONE))
2215 {
2216 pWait->fResEvents = VMMDEV_EVENT_HGCM;
2217 rc |= RTSemEventMultiSignal(pWait->Event);
2218 }
2219 fEvents &= ~VMMDEV_EVENT_HGCM;
2220 }
2221#endif
2222
2223 /*
2224 * Normal FIFO waiter evaluation.
2225 */
2226 fEvents |= pDevExt->f32PendingEvents;
2227 for (pWait = pDevExt->WaitList.pHead; pWait; pWait = pWait->pNext)
2228 if ( (pWait->fReqEvents & fEvents)
2229 && !pWait->fResEvents)
2230 {
2231 pWait->fResEvents = pWait->fReqEvents & fEvents;
2232 fEvents &= ~pWait->fResEvents;
2233 rc |= RTSemEventMultiSignal(pWait->Event);
2234 if (!fEvents)
2235 break;
2236 }
2237 ASMAtomicWriteU32(&pDevExt->f32PendingEvents, fEvents);
2238 }
2239 else /* something is serious wrong... */
2240 Log(("VBoxGuestCommonISR: acknowledge events failed rc=%d (events=%#x)!!\n",
2241 pReq->header.rc, pReq->events));
2242 }
2243 else
2244 LogFlow(("VBoxGuestCommonISR: not ours\n"));
2245
2246 /*
2247 * Work the poll and async notification queues on OSes that implements that.
2248 * Do this outside the spinlock to prevent some recursive spinlocking.
2249 */
2250#if defined(RT_OS_SOLARIS)
2251 RTSpinlockRelease(pDevExt->EventSpinlock, &Tmp);
2252#else
2253 RTSpinlockReleaseNoInts(pDevExt->EventSpinlock, &Tmp);
2254#endif
2255
2256 if (fMousePositionChanged)
2257 {
2258 ASMAtomicIncU32(&pDevExt->u32MousePosChangedSeq);
2259 VBoxGuestNativeISRMousePollEvent(pDevExt);
2260 }
2261
2262 Assert(rc == 0);
2263 return fOurIrq;
2264}
2265
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette